diff --git a/go.mod b/go.mod index 612ae884..23f41b96 100644 --- a/go.mod +++ b/go.mod @@ -1,14 +1,14 @@ module github.com/neuvector/sigstore-interface -go 1.22.0 +go 1.23.4 toolchain go1.24.1 require ( - github.com/google/go-containerregistry v0.19.1 - github.com/sigstore/cosign/v2 v2.2.4 - github.com/sigstore/rekor v1.3.6 - github.com/sigstore/sigstore v1.8.3 + github.com/google/go-containerregistry v0.20.3 + github.com/sigstore/cosign/v2 v2.4.3 + github.com/sigstore/rekor v1.3.9 + github.com/sigstore/sigstore v1.8.15 github.com/sirupsen/logrus v1.9.3 github.com/theupdateframework/go-tuf v0.7.0 ) @@ -19,20 +19,20 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/docker/cli v24.0.7+incompatible // indirect + github.com/docker/cli v27.5.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.1.2+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.0 // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect @@ -47,7 +47,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.1.8 // indirect + github.com/google/certificate-transparency-go v1.3.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-github/v55 v55.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -57,18 +57,19 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/imdario/mergo v0.3.16 // indirect + github.com/in-toto/attestation v1.1.0 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.4 // indirect - github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 // indirect - github.com/magiconair/properties v1.8.7 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect + github.com/magiconair/properties v1.8.9 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -77,45 +78,49 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/timestamp-authority v1.2.2 // indirect + github.com/sigstore/protobuf-specs v0.4.0 // indirect + github.com/sigstore/sigstore-go v0.7.0 // indirect + github.com/sigstore/timestamp-authority v1.2.4 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.18.2 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/viper v1.19.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf/v2 v2.0.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/vbatts/tar-split v0.11.5 // indirect - github.com/xanzy/go-gitlab v0.102.0 // indirect + github.com/vbatts/tar-split v0.11.6 // indirect + gitlab.com/gitlab-org/api/client-go v0.123.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.19.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.5.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect - gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect + golang.org/x/crypto v0.33.0 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.35.0 // indirect + golang.org/x/oauth2 v0.26.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/term v0.29.0 // indirect + golang.org/x/text v0.22.0 // indirect + golang.org/x/time v0.10.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -123,9 +128,9 @@ require ( k8s.io/api v0.28.3 // indirect k8s.io/apimachinery v0.30.1 // indirect k8s.io/client-go v0.28.3 // indirect - k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index 82124a3f..a7bccce0 100644 --- a/go.sum +++ b/go.sum @@ -1,34 +1,40 @@ -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU= -cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs= -cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs= -cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e h1:GwCVItFUPxwdsEYnlUcJ6PJxOjTeFFCKOh6QWg4oAzQ= -cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e/go.mod h1:ApHceQLLwcOkCEXM1+DyCXTHEJhNGDpJ2kmV6axsx24= -cuelang.org/go v0.8.1 h1:VFYsxIFSPY5KgSaH1jQ2GxHOrbu6Ga3kEI70yCZwnOg= -cuelang.org/go v0.8.1/go.mod h1:CoDbYolfMms4BhWUlhD+t5ORnihR7wvjcfgyO9lL5FI= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.14.1 h1:AwoJbzUdxA/whv1qj3TLKwh3XX5sikny2fc40wUl+h0= +cloud.google.com/go/auth v0.14.1/go.mod h1:4JHUxlGXisL0AW8kXPtUF6ztuOksyfUQNFjfsOCXkPM= +cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= +cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= +cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/kms v1.20.5 h1:aQQ8esAIVZ1atdJRxihhdxGQ64/zEbJoJnCz/ydSmKg= +cloud.google.com/go/kms v1.20.5/go.mod h1:C5A8M1sv2YWYy1AE6iSrnddSG9lRGdJq5XEdBy28Lmw= +cloud.google.com/go/longrunning v0.6.2 h1:xjDfh1pQcWPEvnfjZmwjKQEcHnpz6lHjfy7Fo0MK+hc= +cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1 h1:mRwydyTyhtRX2wXS3mqYWzR2qlv6KsmoKXmlz5vInjg= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1/go.mod h1:5A4xfTzHTXfeVJBU6RAUf+QrlfTCW+017q/QiW+sMLg= +cuelang.org/go v0.12.0 h1:q4W5I+RtDIA27rslQyyt6sWkXX0YS9qm43+U1/3e0kU= +cuelang.org/go v0.12.0/go.mod h1:B4+kjvGGQnbkz+GuAv1dq/R308gTkp0sO28FdMrJ2Kw= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 h1:kcnfY4vljxXliXDBrA9K9lwF8IoEZ4Up6Eg9kWTIm28= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.0 h1:7rKG7UmnrxX4N53TFhkYqjc+kVUZuw0fL8I3Fh+Ld9E= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.0/go.mod h1:Wjo+24QJVhhl/L7jy6w9yzFF2yDOf3cKECAa8ecf9vE= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0 h1:eXnN9kaS8TiDwXjoie3hMRLuwdUBUMW9KRgOqB3mCaw= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0/go.mod h1:XIpam8wumeZ5rVMuhdDQLMfIPDf1WO3IzrCRO3e3e3o= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= @@ -45,18 +51,18 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE= github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= -github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= -github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= +github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= @@ -79,61 +85,63 @@ github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOq github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= -github.com/aliyun/credentials-go v1.3.1 h1:uq/0v7kWrxmoLGpqjx7vtQ/s03f0zR//0br/xWDTE28= -github.com/aliyun/credentials-go v1.3.1/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0= +github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g= +github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.51.6 h1:Ld36dn9r7P9IjU8WZSaswQ8Y/XUCRpewim5980DwYiU= -github.com/aws/aws-sdk-go v1.51.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= -github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= -github.com/aws/aws-sdk-go-v2/config v1.27.9 h1:gRx/NwpNEFSk+yQlgmk1bmxxvQ5TyJ76CWXs9XScTqg= -github.com/aws/aws-sdk-go-v2/config v1.27.9/go.mod h1:dK1FQfpwpql83kbD873E9vz4FyAxuJtR22wzoXn3qq0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.9 h1:N8s0/7yW+h8qR8WaRlPQeJ6czVMNQVNtNdUqf6cItao= -github.com/aws/aws-sdk-go-v2/credentials v1.17.9/go.mod h1:446YhIdmSV0Jf/SLafGZalQo+xr2iw7/fzXGDPTU1yQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 h1:af5YzcLf80tv4Em4jWVD75lpnOHSBkPUZxZfGkrI3HI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0/go.mod h1:nQ3how7DMnFMWiU1SpECohgC82fpn4cKZ875NDMmwtA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 h1:0ScVK/4qZ8CIW0k8jOeFVsyS/sAiXpYxRBLolMkuLQM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4/go.mod h1:84KyjNZdHC6QZW08nfHI6yZgPd+qRgaWcYsyLUo3QY8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 h1:sHmMWWX5E7guWEFQ9SVo6A3S4xpPrWnd77a6y4WM6PU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4/go.mod h1:WjpDrhWisWOIoS9n3nk67A3Ll1vfULJ9Kq6h29HTD48= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 h1:y6LX9GUoEA3mO0qpFl1ZQHj1rFyPWVphlzebiSt2tKE= -github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2/go.mod h1:Q0LcmaN/Qr8+4aSBrdrXXePqoX0eOuYpJLbYpilmWnA= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4/jyq3Z8gNzmoJupHAoBp0= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 h1:b+E7zIUHMmcB4Dckjpkapoy47W6C9QBv/zoUP+Hn8Kc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6/go.mod h1:S2fNV0rxrP78NhPbCZeQgY8H9jdDMeGtwcfZIRxzBqU= -github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 h1:yS0JkEdV6h9JOo8sy2JSpjX+i7vsKifU8SIeHrqiDhU= -github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 h1:mnbuWHOcM70/OFUlZZ5rcdfA8PflGXXiefU/O+1S3+8= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.3/go.mod h1:5HFu51Elk+4oRBZVxmHrSds5jFXmFj8C3w7DVF2gnrs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 h1:uLq0BKatTmDzWa/Nu4WO0M1AaQDaPpwTKAeByEc6WFM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3/go.mod h1:b+qdhjnxj8GSR6t5YfphOffeoQSQ1KmpoVVuBn+PWxs= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 h1:J/PpTf/hllOjx8Xu9DMflff3FajfLxqM5+tepvVXmxg= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.5/go.mod h1:0ih0Z83YDH/QeQ6Ori2yGE2XvWYv/Xm+cZc01LC6oK0= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= +github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2/config v1.29.6 h1:fqgqEKK5HaZVWLQoLiC9Q+xDlSp+1LYidp6ybGE2OGg= +github.com/aws/aws-sdk-go-v2/config v1.29.6/go.mod h1:Ft+WLODzDQmCTHDvqAH1JfC2xxbZ0MxpZAcJqmE1LTQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59 h1:9btwmrt//Q6JcSdgJOLI98sdr5p7tssS9yAsGe8aKP4= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59/go.mod h1:NM8fM6ovI3zak23UISdWidyZuI1ghNe2xjzUZAyT+08= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 h1:KwsodFKVQTlI5EyhRSugALzsV6mG/SGrdjlMXSZSdso= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28/go.mod h1:EY3APf9MzygVhKuPXAc5H+MkGb8k/DOSQjWS0LgkKqI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ecr v1.40.3 h1:a+210FCU/pR5hhKRaskRfX/ogcyyzFBrehcTk5DTAyU= +github.com/aws/aws-sdk-go-v2/service/ecr v1.40.3/go.mod h1:dtD3a4sjUjVL86e0NUvaqdGvds5ED6itUiZPDaT+Gh8= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.31.2 h1:E6/Myrj9HgLF22medmDrKmbpm4ULsa+cIBNx3phirBk= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.31.2/go.mod h1:OQ8NALFcchBJ/qruak6zKUQodovnTKKaReTuCkc5/9Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.18 h1:pi9M/9n1PLayBXjia7LfwgXwcpFdFO7Q2cqKOZa1ZmM= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.18/go.mod h1:vZXvmzfhdsPj/axc8+qk/2fSCP4hGyaZ1MAduWEHAxM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14/go.mod h1:RVwIw3y/IqxC2YEXSIkAzRDdEU1iRabDPaYjpGCbCGQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 h1:TzeR06UCMUq+KA3bDkujxK1GVGy+G8qQN/QVYzGLkQE= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.14/go.mod h1:dspXf/oYWGWo6DEvj98wpaTeqt5+DMidZD0A9BYTizc= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 h1:50sS0RWhGpW/yZx2KcDNEb1u1MANv5BMEkJgcieEDTA= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1/go.mod h1:ErZOtbzuHabipRTDTor0inoRlYwbsV1ovwSxjGs/uJo= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/buildkite/agent/v3 v3.62.0 h1:yvzSjI8Lgifw883I8m9u8/L/Thxt4cLFd5aWPn3gg70= -github.com/buildkite/agent/v3 v3.62.0/go.mod h1:jN6SokGXrVNNIpI0BGQ+j5aWeI3gin8F+3zwA5Q6gqM= -github.com/buildkite/go-pipeline v0.3.2 h1:SW4EaXNwfjow7xDRPGgX0Rcx+dPj5C1kV9LKCLjWGtM= -github.com/buildkite/go-pipeline v0.3.2/go.mod h1:iY5jzs3Afc8yHg6KDUcu3EJVkfaUkd9x/v/OH98qyUA= -github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 h1:k6UDF1uPYOs0iy1HPeotNa155qXRWrzKnqAaGXHLZCE= -github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251/go.mod h1:gbPR1gPu9dB96mucYIR7T3B7p/78hRVSOuzIWLHK2Y4= +github.com/buildkite/agent/v3 v3.92.1 h1:6HLdDbU5z6ZyJ3TCt/UQEcLv2nhg/gdS4ApnsrUwhOE= +github.com/buildkite/agent/v3 v3.92.1/go.mod h1:mUNebi1cYh66iBjqVdJTgEn+sm53x8zC/XQQfpZSk9A= +github.com/buildkite/go-pipeline v0.13.3 h1:llI7sAdZ7sqYE7r8ePlmDADRhJ1K0Kua2+gv74Z9+Es= +github.com/buildkite/go-pipeline v0.13.3/go.mod h1:1uC2XdHkTV1G5jYv9K8omERIwrsYbBruBrPx1Zu1uFw= +github.com/buildkite/interpolate v0.1.5 h1:v2Ji3voik69UZlbfoqzx+qfcsOKLA61nHdU79VV+tPU= +github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= +github.com/buildkite/roko v1.3.1 h1:t7K30ceLLYn6k7hQP4oq1c7dVlhgD5nRcuSRDEEnY1s= +github.com/buildkite/roko v1.3.1/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= -github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -150,15 +158,15 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= -github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= -github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= -github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= -github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo= +github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= -github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= +github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= +github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -170,20 +178,18 @@ github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1G github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= -github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.5.0+incompatible h1:aMphQkcGtpHixwwhAXJT1rrK/detk2JIvDaFkLctbGM= +github.com/docker/cli v27.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.1.2+incompatible h1:AhGzR1xaQIy53qCkxARaFluI00WPGtXn0AJuoQsVYTY= -github.com/docker/docker v27.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= -github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/proto v1.12.1 h1:6n/Z2pZAnBwuhU66Gs8160B8rrrYKo7h2F2sCOnNceE= -github.com/emicklei/proto v1.12.1/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/emicklei/proto v1.13.4 h1:myn1fyf8t7tAqIzV91Tj9qXpvyXXGXk8OS2H6IBSc9g= +github.com/emicklei/proto v1.13.4/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -193,19 +199,19 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= -github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= @@ -228,25 +234,25 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= -github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= -github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= +github.com/go-piv/piv-go/v2 v2.3.0 h1:kKkrYlgLQTMPA6BiSL25A7/x4CEh2YCG7rtb/aTkx+g= +github.com/go-piv/piv-go/v2 v2.3.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -260,8 +266,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/certificate-transparency-go v1.1.8 h1:LGYKkgZF7satzgTak9R4yzfJXEeYVAjV6/EAEJOf1to= -github.com/google/certificate-transparency-go v1.1.8/go.mod h1:bV/o8r0TBKRf1X//iiiSgWrvII4d7/8OiA+3vG26gI8= +github.com/google/certificate-transparency-go v1.3.1 h1:akbcTfQg0iZlANZLn0L9xOeWtyCIdeoYhKrqi5iH3Go= +github.com/google/certificate-transparency-go v1.3.1/go.mod h1:gg+UQlx6caKEDQ9EElFOujyxEQEfOiQzAt6782Bvi8k= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -272,8 +278,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= -github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg= github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -282,20 +288,20 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= -github.com/google/trillian v1.6.0 h1:jMBeDBIkINFvS2n6oV5maDqfRlxREAc6CW9QYWQ0qT4= -github.com/google/trillian v1.6.0/go.mod h1:Yu3nIMITzNhhMJEHjAtp6xKiu+H/iHu2Oq5FjV2mCWI= +github.com/google/trillian v1.7.1 h1:+zX8jLM3524bAMPS+VxaDIDgsMv3/ty6DuLWerHXcek= +github.com/google/trillian v1.7.1/go.mod h1:E1UMAHqpZCA8AQdrKdWmHmtUfSeiD0sDWD1cv00Xa+c= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -316,26 +322,41 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.12.2 h1:7YkCTE5Ni90TcmYHDBExdt4WGJxhpzaHqR6uGbQb/rE= -github.com/hashicorp/vault/api v1.12.2/go.mod h1:LSGf1NGT1BnvFFnKVtnvcaLBM2Lz+gJdpL6HUYed8KE= +github.com/hashicorp/vault/api v1.16.0 h1:nbEYGJiAPGzT9U4oWgaaB0g+Rj8E59QuHKyA5LhwQN4= +github.com/hashicorp/vault/api v1.16.0/go.mod h1:KhuUhzOD8lDSk29AtzNjgAu2kxRA9jL9NAbkFlqvkBA= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/in-toto/attestation v1.1.0 h1:oRWzfmZPDSctChD0VaQV7MJrywKOzyNrtpENQFq//2Q= +github.com/in-toto/attestation v1.1.0/go.mod h1:DB59ytd3z7cIHgXxwpSX2SABrU6WJUKg/grpdgHVgVs= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 h1:Dj0L5fhJ9F82ZJyVOmBx6msDp/kfd1t9GRfny/mfJA0= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= +github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE= -github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -344,18 +365,18 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s= -github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491/go.mod h1:o158RFmdEbYyIZmXAbrvmJWesbyxlLKee6X64VPVuOc= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= +github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= +github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -369,26 +390,27 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= -github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= +github.com/mozillazg/docker-credential-acr-helper v0.4.0 h1:Uoh3Z9CcpEDnLiozDx+D7oDgRq7X+R296vAqAumnOcw= +github.com/mozillazg/docker-credential-acr-helper v0.4.0/go.mod h1:2kiicb3OlPytmlNC9XGkLvVC+f0qTiJw3f/mhmeeQBg= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= -github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= +github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -403,8 +425,8 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= -github.com/open-policy-agent/opa v0.63.0 h1:ztNNste1v8kH0/vJMJNquE45lRvqwrM5mY9Ctr9xIXw= -github.com/open-policy-agent/opa v0.63.0/go.mod h1:9VQPqEfoB2N//AToTxzZ1pVTVPUoF2Mhd64szzjWPpU= +github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s= +github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -413,8 +435,8 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -423,20 +445,20 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= -github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= -github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62zIzQwvoD7Ekj3ePDF5bv9Xxy0w6AZk0qYbjUk= -github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d h1:HWfigq7lB31IeJL8iy7jkUmU/PG1Sr8jVGhS749dbUA= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a h1:w3tdWGKbLGBPtR/8/oO74W6hmz0qE5q0z9aqSAewaaM= +github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a/go.mod h1:S8kfXMp+yh77OxPD4fdM6YUknrZpQxLhvxzS4gDHENY= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= @@ -448,30 +470,36 @@ github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGq github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= -github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v2 v2.2.4 h1:iY4vtEacmu2hkNj1Fh+8EBqBwKs2DHM27/lbNWDFJro= -github.com/sigstore/cosign/v2 v2.2.4/go.mod h1:JZlRD2uaEjVAvZ1XJ3QkkZJhTqSDVtLaet+C/TMR81Y= -github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= -github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= -github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= -github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4= -github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3 h1:LTfPadUAo+PDRUbbdqbeSl2OuoFQwUFTnJ4stu+nwWw= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3/go.mod h1:QV/Lxlxm0POyhfyBtIbTWxNeF18clMlkkyL9mu45y18= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3 h1:xgbPRCr2npmmsuVVteJqi/ERw9+I13Wou7kq0Yk4D8g= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3/go.mod h1:G4+I83FILPX6MtnoaUdmv/bRGEVtR3JdLeJa/kXdk/0= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3 h1:vDl2fqPT0h3D/k6NZPlqnKFd1tz3335wm39qjvpZNJc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3/go.mod h1:9uOJXbXEXj+M6QjMKH5PaL5WDMu43rHfbIMgXzA8eKI= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3 h1:h9G8j+Ds21zqqulDbA/R/ft64oQQIyp8S7wJYABYSlg= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3/go.mod h1:zgCeHOuqF6k7A7TTEvftcA9V3FRzB7mrPtHOhXAQBnc= -github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= -github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= +github.com/sigstore/cosign/v2 v2.4.3 h1:UAU/6Z33gVBCV01b2l1fdvMml9IJTrsDiYQDB5K+sQI= +github.com/sigstore/cosign/v2 v2.4.3/go.mod h1:6vZ2vHarfJB3N4FHYV/5M1qdHiWi2PM1c8ogNPCe2jA= +github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= +github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/protobuf-specs v0.4.0 h1:yoZbdh0kZYKOSiVbYyA8J3f2wLh5aUk2SQB7LgAfIdU= +github.com/sigstore/protobuf-specs v0.4.0/go.mod h1:FKW5NYhnnFQ/Vb9RKtQk91iYd0MKJ9AxyqInEwU6+OI= +github.com/sigstore/rekor v1.3.9 h1:sUjRpKVh/hhgqGMs0t+TubgYsksArZ6poLEC3MsGAzU= +github.com/sigstore/rekor v1.3.9/go.mod h1:xThNUhm6eNEmkJ/SiU/FVU7pLY2f380fSDZFsdDWlcM= +github.com/sigstore/sigstore v1.8.15 h1:9HHnZmxjPQSTPXTCZc25HDxxSTWwsGMh/ZhWZZ39maU= +github.com/sigstore/sigstore v1.8.15/go.mod h1:+Wa5mrG6A+Gss516YC9owy10q3IazqIRe0y1EoQRHHM= +github.com/sigstore/sigstore-go v0.7.0 h1:bIGPc2IbnbxnzlqQcKlh1o96bxVJ4yRElpP1gHrOH48= +github.com/sigstore/sigstore-go v0.7.0/go.mod h1:4RrCK+i+jhx7lyOG2Vgef0/kFLbKlDI1hrioUYvkxxA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.15 h1:g/hPoaemFv/6ZJIRyb5I1lA4qU9PZwCTu/GkvFV5jEw= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.15/go.mod h1:n2yKi/b29+JB54PyONruHvvha4zugC7jzr+A16cNLvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.15 h1:K2GstKWXftcpmg/wHfcJFYKWuj+YRSoTgwxm3ox2FjE= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.15/go.mod h1:tOSdKYXCkplk54FSR/58UYQm1S/GlQK4Y1GgMhiq40U= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.15 h1:ThpZMfR2TecI6Ji7s/nFlcCIkwXYhZUYziJdZs3pOaw= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.15/go.mod h1:x+4wvq6tzIQRZaSdMS6/VT9nuCoepypozfzP4Tqwnqw= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.15 h1:mR+VaOSx2sUpaE8lXarinHcT8UXi+fKE4ESNBzDRAtQ= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.15/go.mod h1:6olKNL2BGrsZPLbO/7kiJzZPxU74270nDI5G3HSSykw= +github.com/sigstore/timestamp-authority v1.2.4 h1:RjXZxOWorEiem/uSr0pFHVtQpyzpcFxgugo5jVqm3mw= +github.com/sigstore/timestamp-authority v1.2.4/go.mod h1:ExrbobKdEuwuBptZIiKp1IaVBRiUeKbiuSyZTO8Okik= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= @@ -480,48 +508,49 @@ github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9yS github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/spiffe/go-spiffe/v2 v2.2.0 h1:9Vf06UsvsDbLYK/zJ4sYsIsHmMFknUD+feA7IYoWMQY= -github.com/spiffe/go-spiffe/v2 v2.2.0/go.mod h1:Urzb779b3+IwDJD2ZbN8fVl3Aa8G4N/PiUe6iXC0XxU= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= -github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.0.2 h1:PyNnjV9BJNzN1ZE6BcWK+5JbF+if370jjzO84SS+Ebo= +github.com/theupdateframework/go-tuf/v2 v2.0.2/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go/v2 v2.3.0 h1:4/TA0lw0lA/iVKBL9f8R5eP7397bfc4antAMXF5JRhs= +github.com/tink-crypto/tink-go/v2 v2.3.0/go.mod h1:kfPOtXIadHlekBTeBtJrHWqoGL+Fm3JQg0wtltPuxLU= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/xanzy/go-gitlab v0.102.0 h1:ExHuJ1OTQ2yt25zBMMj0G96ChBirGYv8U7HyUiYkZ+4= -github.com/xanzy/go-gitlab v0.102.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= +github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= +github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -533,26 +562,28 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= -github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= -github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +gitlab.com/gitlab-org/api/client-go v0.123.0 h1:W3LZ5QNyiSCJA0Zchkwz8nQIUzOuDoSWMZtRDT5DjPI= +gitlab.com/gitlab-org/api/client-go v0.123.0/go.mod h1:Jh0qjLILEdbO6z/OY94RD+3NDQRUKiuFSFYozN6cpKM= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.step.sm/crypto v0.44.2 h1:t3p3uQ7raP2jp2ha9P6xkQF85TJZh+87xmjSLaib+jk= -go.step.sm/crypto v0.44.2/go.mod h1:x1439EnFhadzhkuaGX7sz03LEMQ+jV4gRamf5LCZJQQ= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.step.sm/crypto v0.57.0 h1:YjoRQDaJYAxHLVwjst0Bl0xcnoKzVwuHCJtEo2VSHYU= +go.step.sm/crypto v0.57.0/go.mod h1:+Lwp5gOVPaTa3H/Ul/TzGbxQPXZZcKIUGMS0lG6n9Go= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -565,16 +596,16 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 h1:mchzmB1XO2pMaKFRqk/+MV3mgGG96aqaPXaMifQU47w= -golang.org/x/exp v0.0.0-20231108232855-2478ac86f678/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -589,18 +620,18 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -623,15 +654,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -639,10 +670,10 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= +golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -650,23 +681,23 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= -google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/api v0.221.0 h1:qzaJfLhDsbMeFee8zBRdt/Nc+xmOuafD/dbdgGfutOU= +google.golang.org/api v0.221.0/go.mod h1:7sOU2+TL4TxUTdbi0gWgAIg7tH5qBXxoyhtL+9x3biQ= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 h1:2duwAxN2+k0xLNpjnHTXoMUgnv6VPSp5fiqTuwSxjmI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -675,14 +706,12 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= -gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -706,16 +735,16 @@ k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/release-utils v0.7.7 h1:JKDOvhCk6zW8ipEOkpTGDH/mW3TI+XqtPp16aaQ79FU= -sigs.k8s.io/release-utils v0.7.7/go.mod h1:iU7DGVNi3umZJ8q6aHyUFzsDUIaYwNnNKGHo3YE5E3s= +sigs.k8s.io/release-utils v0.11.0 h1:FUVSw2dO67M7mfcQx9AITEGnTHoBOdJNbbQ3FT3o8mA= +sigs.k8s.io/release-utils v0.11.0/go.mod h1:wAlXz8xruzvqZUsorI64dZ3lbkiDnYSlI4IYC6l2yEA= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index b071cea5..6aba0ef1 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -436,9 +436,8 @@ func importTar(in io.ReaderAt) (*tarFile, error) { if err != nil { if err == io.EOF { break - } else { - return nil, fmt.Errorf("failed to parse tar file, %w", err) } + return nil, fmt.Errorf("failed to parse tar file, %w", err) } switch cleanEntryName(h.Name) { case PrefetchLandmark, NoPrefetchLandmark: diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 0ca6fd75..ba650b4d 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -26,12 +26,13 @@ import ( "archive/tar" "bytes" "compress/gzip" + "crypto/rand" "crypto/sha256" "encoding/json" "errors" "fmt" "io" - "math/rand" + "math/big" "os" "path/filepath" "reflect" @@ -45,10 +46,6 @@ import ( digest "github.com/opencontainers/go-digest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression @@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } if sampleEntry == nil { t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + return } if targetEntry == nil { t.Fatalf("rewrite target not found") + return } targetEntry.Offset = sampleEntry.Offset }, @@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX func randomContents(n int) string { b := make([]rune, n) for i := range b { - b[i] = runes[rand.Intn(len(runes))] + bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes)))) + if err != nil { + panic(err) + } + b[i] = runes[int(bi.Int64())] } return string(b) } diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index 483743c9..ad1abd49 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -2,6 +2,7 @@ # This file lists all contributors to the repository. # See scripts/docs/generate-authors.sh to make modifications. +A. Lester Buck III Aanand Prasad Aaron L. Xu Aaron Lehmann @@ -16,6 +17,7 @@ Adolfo Ochagavía Adrian Plata Adrien Duermael Adrien Folie +Adyanth Hosavalike Ahmet Alp Balkan Aidan Feldman Aidan Hobson Sayers @@ -24,9 +26,10 @@ Akhil Mohan Akihiro Suda Akim Demaille Alan Thompson +Alano Terblanche Albert Callarisa Alberto Roura -Albin Kerouanton +Albin Kerouanton Aleksa Sarai Aleksander Piotrowski Alessandro Boch @@ -34,6 +37,7 @@ Alex Couture-Beil Alex Mavrogiannis Alex Mayer Alexander Boyd +Alexander Chneerov Alexander Larsson Alexander Morozov Alexander Ryabov @@ -41,6 +45,7 @@ Alexandre González Alexey Igrychev Alexis Couvreur Alfred Landrum +Ali Rostami Alicia Lauerman Allen Sun Alvin Deng @@ -61,6 +66,7 @@ Andrew Hsu Andrew Macpherson Andrew McDonnell Andrew Po +Andrew-Zipperer Andrey Petrov Andrii Berehuliak André Martins @@ -79,7 +85,9 @@ Arko Dasgupta Arnaud Porterie Arnaud Rebillout Arthur Peka +Ashly Mathew Ashwini Oruganti +Aslam Ahemad Azat Khuyiyakhmetov Bardia Keyoumarsi Barnaby Gray @@ -98,7 +106,9 @@ Bill Wang Bin Liu Bingshen Wang Bishal Das +Bjorn Neergaard Boaz Shuster +Boban Acimovic Bogdan Anton Boris Pruessmann Brad Baker @@ -109,17 +119,20 @@ Brent Salisbury Bret Fisher Brian (bex) Exelbierd Brian Goff +Brian Tracy Brian Wieder Bruno Sousa Bryan Bess Bryan Boreham Bryan Murphy bryfry +Calvin Liu Cameron Spear Cao Weiwei Carlo Mion Carlos Alexandro Becker Carlos de Paula +Casey Korver Ce Gao Cedric Davies Cezar Sa Espinola @@ -136,6 +149,7 @@ Chen Chuanliang Chen Hanxiao Chen Mingjie Chen Qiu +Chris Chinchilla Chris Couzens Chris Gavin Chris Gibson @@ -150,6 +164,8 @@ Christophe Vidal Christopher Biscardi Christopher Crone Christopher Jones +Christopher Petito <47751006+krissetto@users.noreply.github.com> +Christopher Petito Christopher Svensson Christy Norman Chun Chen @@ -163,6 +179,8 @@ Conner Crosby Corey Farrell Corey Quon Cory Bennet +Cory Snider +Craig Osterhout Craig Wilhite Cristian Staretu Daehyeok Mun @@ -171,6 +189,7 @@ Daisuke Ito dalanlan Damien Nadé Dan Cotora +Danial Gharib Daniel Artine Daniel Cassidy Daniel Dao @@ -199,6 +218,7 @@ David Cramer David Dooling David Gageot David Karlsson +David le Blanc David Lechner David Scott David Sheets @@ -210,6 +230,7 @@ Denis Defreyne Denis Gladkikh Denis Ollier Dennis Docter +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Derek McGowan Des Preston Deshi Xiao @@ -232,11 +253,13 @@ DongGeon Lee Doug Davis Drew Erny Ed Costello +Ed Morley <501702+edmorley@users.noreply.github.com> Elango Sivanandam Eli Uriegas Eli Uriegas Elias Faxö Elliot Luo <956941328@qq.com> +Eric Bode Eric Curtin Eric Engestrom Eric G. Noriega @@ -254,6 +277,7 @@ Eugene Yakubovich Evan Allrich Evan Hazlett Evan Krall +Evan Lezar Evelyn Xu Everett Toews Fabio Falci @@ -275,11 +299,13 @@ Frederik Nordahl Jul Sabroe Frieder Bluemle Gabriel Gore Gabriel Nicolas Avellaneda +Gabriela Georgieva Gaetan de Villele Gang Qiao Gary Schaetz Genki Takiuchi George MacRorie +George Margaritis George Xie Gianluca Borello Gildas Cuisinier @@ -288,6 +314,8 @@ Gleb Stsenov Goksu Toprak Gou Rao Govind Rai +Grace Choi +Graeme Wiebe Grant Reaber Greg Pflaum Gsealy @@ -311,6 +339,7 @@ Hernan Garcia Hongbin Lu Hu Keping Huayi Zhang +Hugo Chastel Hugo Gabriel Eyherabide huqun Huu Nguyen @@ -329,9 +358,12 @@ Ivan Grund Ivan Markin Jacob Atzen Jacob Tomlinson +Jacopo Rigoli Jaivish Kothari Jake Lambert Jake Sanders +Jake Stokes +Jakub Panek James Nesbitt James Turnbull Jamie Hannaford @@ -363,6 +395,7 @@ Jezeniel Zapanta Jian Zhang Jie Luo Jilles Oldenbeuving +Jim Chen Jim Galasyn Jim Lin Jimmy Leger @@ -393,6 +426,7 @@ John Willis Jon Johnson Jon Zeolla Jonatas Baldin +Jonathan A. Sternberg Jonathan Boulle Jonathan Lee Jonathan Lomas @@ -408,10 +442,12 @@ Josh Chorlton Josh Hawn Josh Horwitz Josh Soref +Julian Julien Barbier Julien Kassar Julien Maitrehenry Justas Brazauskas +Justin Chadwell Justin Cormack Justin Simonelis Justyn Temme @@ -434,7 +470,7 @@ Kelton Bassingthwaite Ken Cochrane Ken ICHIKAWA Kenfe-Mickaël Laventure -Kevin Alvarez +Kevin Alvarez Kevin Burke Kevin Feyrer Kevin Kern @@ -445,6 +481,7 @@ Kevin Woblick khaled souf Kim Eik Kir Kolyshkin +Kirill A. Korinsky Kotaro Yoshimatsu Krasi Georgiev Kris-Mikael Krister @@ -454,6 +491,7 @@ Kyle Mitofsky Lachlan Cooper Lai Jiangshan Lars Kellogg-Stedman +Laura Brehm Laura Frank Laurent Erignoux Lee Gaines @@ -462,10 +500,10 @@ Lennie Leo Gallucci Leonid Skorospelov Lewis Daly +Li Fu Bang Li Yi Li Yi Liang-Chi Hsieh -Lifubang Lihua Tang Lily Guo Lin Lu @@ -480,6 +518,7 @@ Louis Opter Luca Favatella Luca Marturana Lucas Chan +Luis Henrique Mulinari Luka Hartwig Lukas Heeren Lukasz Zajaczkowski @@ -498,10 +537,12 @@ mapk0y Marc Bihlmaier Marc Cornellà Marco Mariani +Marco Spiess Marco Vedovati Marcus Martins Marianna Tessel Marius Ileana +Marius Meschter Marius Sturm Mark Oates Marsh Macy @@ -510,6 +551,7 @@ Mary Anthony Mason Fish Mason Malone Mateusz Major +Mathias Duedahl <64321057+Lussebullen@users.noreply.github.com> Mathieu Champlon Mathieu Rollet Matt Gucci @@ -519,9 +561,11 @@ Matthew Heon Matthieu Hauglustaine Mauro Porras P Max Shytikov +Max-Julian Pogner Maxime Petazzoni Maximillian Fan Xavier Mei ChunTao +Melroy van den Berg Metal <2466052+tedhexaflow@users.noreply.github.com> Micah Zoltu Michael A. Smith @@ -581,6 +625,7 @@ Nathan McCauley Neil Peterson Nick Adcock Nick Santos +Nick Sieger Nico Stapelbroek Nicola Kabar Nicolas Borboën @@ -593,6 +638,7 @@ Nishant Totla NIWA Hideyuki Noah Treuhaft O.S. Tezer +Oded Arbel Odin Ugedal ohmystack OKA Naoya @@ -604,19 +650,21 @@ Otto Kekäläinen Ovidio Mallo Pascal Borreli Patrick Böänziger +Patrick Daigle <114765035+pdaig@users.noreply.github.com> Patrick Hemmer Patrick Lang Paul Paul Kehrer Paul Lietar Paul Mulders +Paul Seyfert Paul Weaver Pavel Pospisil Paweł Gronowski Paweł Pokrywka Paweł Szczekutowicz Peeyush Gupta -Per Lundberg +Per Lundberg Peter Dave Hello Peter Edge Peter Hsu @@ -639,6 +687,7 @@ Preston Cowley Pure White Qiang Huang Qinglan Peng +QQ喵 qudongfang Raghavendra K T Rahul Kadyan @@ -657,6 +706,7 @@ Rick Wieman Ritesh H Shukla Riyaz Faizullabhoy Rob Gulewich +Rob Murray Robert Wallis Robin Naundorf Robin Speekenbrink @@ -670,6 +720,7 @@ Rory Hunter Ross Boucher Rubens Figueiredo Rui Cao +Rui JingAn Ryan Belgrave Ryan Detzel Ryan Stelly @@ -689,6 +740,7 @@ Sandro Jäckel Santhosh Manohar Sargun Dhillon Saswat Bhattacharya +Saurabh Kumar Scott Brenner Scott Collier Sean Christopherson @@ -762,6 +814,7 @@ Tim Hockin Tim Sampson Tim Smith Tim Waugh +Tim Welsh Tim Wraight timfeirg Timothy Hobbs @@ -788,6 +841,7 @@ uhayate Ulrich Bareth Ulysses Souza Umesh Yadav +Vaclav Struhar Valentin Lorentz Vardan Pogosian Venkateswara Reddy Bukkasamudram @@ -795,6 +849,7 @@ Veres Lajos Victor Vieux Victoria Bialas Viktor Stanchev +Ville Skyttä Vimal Raghubir Vincent Batts Vincent Bernat @@ -831,6 +886,7 @@ Yong Tang Yosef Fertel Yu Peng Yuan Sun +Yucheng Wu Yue Zhang Yunxiang Huang Zachary Romero @@ -842,9 +898,11 @@ Zhang Wei Zhang Wentao ZhangHang zhenghenghuo +Zhiwei Liang Zhou Hao Zhoulin Xie Zhu Guihua +Zhuo Zhi Álex González Álvaro Lázaro Átila Camurça Alves diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE index 58b19b6d..1c40faae 100644 --- a/vendor/github.com/docker/cli/NOTICE +++ b/vendor/github.com/docker/cli/NOTICE @@ -14,6 +14,6 @@ United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. -For more information, please see https://www.bis.doc.gov +For more information, see https://www.bis.doc.gov See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index b7c05c3f..5a518432 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -4,44 +4,38 @@ import ( "fmt" "io" "os" + "os/user" "path/filepath" + "runtime" "strings" "sync" "github.com/docker/cli/cli/config/configfile" "github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/types" - "github.com/docker/docker/pkg/homedir" "github.com/pkg/errors" ) const ( - // ConfigFileName is the name of config file + // EnvOverrideConfigDir is the name of the environment variable that can be + // used to override the location of the client configuration files (~/.docker). + // + // It takes priority over the default, but can be overridden by the "--config" + // command line option. + EnvOverrideConfigDir = "DOCKER_CONFIG" + + // ConfigFileName is the name of the client configuration file inside the + // config-directory. ConfigFileName = "config.json" configFileDir = ".docker" - oldConfigfile = ".dockercfg" // Deprecated: remove once we stop printing deprecation warning contextsDir = "contexts" ) var ( initConfigDir = new(sync.Once) configDir string - homeDir string ) -// resetHomeDir is used in testing to reset the "homeDir" package variable to -// force re-lookup of the home directory between tests. -func resetHomeDir() { - homeDir = "" -} - -func getHomeDir() string { - if homeDir == "" { - homeDir = homedir.Get() - } - return homeDir -} - // resetConfigDir is used in testing to reset the "configDir" package variable // and its sync.Once to force re-lookup between tests. func resetConfigDir() { @@ -49,19 +43,40 @@ func resetConfigDir() { initConfigDir = new(sync.Once) } -func setConfigDir() { - if configDir != "" { - return - } - configDir = os.Getenv("DOCKER_CONFIG") - if configDir == "" { - configDir = filepath.Join(getHomeDir(), configFileDir) +// getHomeDir returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// On non-Windows platforms, it falls back to nss lookups, if the home +// directory cannot be obtained from environment-variables. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +// +// getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker +// as dependency for consumers that only need to read the config-file. +// +// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get +func getHomeDir() string { + home, _ := os.UserHomeDir() + if home == "" && runtime.GOOS != "windows" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } } + return home } // Dir returns the directory the configuration file is stored in func Dir() string { - initConfigDir.Do(setConfigDir) + initConfigDir.Do(func() { + configDir = os.Getenv(EnvOverrideConfigDir) + if configDir == "" { + configDir = filepath.Join(getHomeDir(), configFileDir) + } + }) return configDir } @@ -72,6 +87,8 @@ func ContextStoreDir() string { // SetDir sets the directory the configuration file is stored in func SetDir(dir string) { + // trigger the sync.Once to synchronise with Dir() + initConfigDir.Do(func() {}) configDir = filepath.Clean(dir) } @@ -85,7 +102,7 @@ func Path(p ...string) (string, error) { } // LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader +// a reader. It returns an error if configData is malformed. func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { configFile := configfile.ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), @@ -94,57 +111,59 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { return &configFile, err } -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser +// Load reads the configuration file ([ConfigFileName]) from the given directory. +// If no directory is given, it uses the default [Dir]. A [*configfile.ConfigFile] +// is returned containing the contents of the configuration file, or a default +// struct if no configfile exists in the given location. +// +// Load returns an error if a configuration file exists in the given location, +// but cannot be read, or is malformed. Consumers must handle errors to prevent +// overwriting an existing configuration file. func Load(configDir string) (*configfile.ConfigFile, error) { - cfg, _, err := load(configDir) - return cfg, err -} - -// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file -// so we can remove the bool return value and collapse this back into `Load` -func load(configDir string) (*configfile.ConfigFile, bool, error) { - printLegacyFileWarning := false - if configDir == "" { configDir = Dir() } + return load(configDir) +} +func load(configDir string) (*configfile.ConfigFile, error) { filename := filepath.Join(configDir, ConfigFileName) configFile := configfile.New(filename) - // Try happy path first - latest config file - if file, err := os.Open(filename); err == nil { - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = errors.Wrap(err, filename) + file, err := os.Open(filename) + if err != nil { + if os.IsNotExist(err) { + // It is OK for no configuration file to be present, in which + // case we return a default struct. + return configFile, nil } - return configFile, printLegacyFileWarning, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return configFile, printLegacyFileWarning, errors.Wrap(err, filename) + // Any other error happening when failing to read the file must be returned. + return configFile, errors.Wrap(err, "loading config file") } - - // Can't find latest config file so check for the old one - filename = filepath.Join(getHomeDir(), oldConfigfile) - if _, err := os.Stat(filename); err == nil { - printLegacyFileWarning = true + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = errors.Wrapf(err, "loading config file: %s: ", filename) } - return configFile, printLegacyFileWarning, nil + return configFile, err } // LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. +// a reference to the ConfigFile struct. If none is found or when failing to load +// the configuration file, it initializes a default ConfigFile struct. If no +// credentials-store is set in the configuration file, it attempts to discover +// the default store to use for the current platform. +// +// Important: LoadDefaultConfigFile prints a warning to stderr when failing to +// load the configuration file, but otherwise ignores errors. Consumers should +// consider using [Load] (and [credentials.DetectDefaultStore]) to detect errors +// when updating the configuration file, to prevent discarding a (malformed) +// configuration file. func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { - configFile, printLegacyFileWarning, err := load(Dir()) + configFile, err := load(Dir()) if err != nil { - fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) - } - if printLegacyFileWarning { - _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format has been removed and the configuration file will be ignored") + // FIXME(thaJeztah): we should not proceed here to prevent overwriting existing (but malformed) config files; see https://github.com/docker/cli/issues/5075 + _, _ = fmt.Fprintln(stderr, "WARNING: Error", err) } if !configFile.ContainsAuth() { configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index 442c3111..ae9dcb33 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -41,6 +41,7 @@ type ConfigFile struct { CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` Aliases map[string]string `json:"aliases,omitempty"` + Features map[string]string `json:"features,omitempty"` } // ProxyConfig contains proxy configuration settings @@ -302,6 +303,7 @@ func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, for registryHostname := range configFile.CredentialHelpers { newAuth, err := configFile.GetAuthConfig(registryHostname) if err != nil { + // TODO(thaJeztah): use context-logger, so that this output can be suppressed (in tests). logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname) continue } diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go index 35388754..06b811e7 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package configfile diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go index 402235bf..a36afc41 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -1,21 +1,22 @@ package credentials -import ( - exec "golang.org/x/sys/execabs" -) +import "os/exec" // DetectDefaultStore return the default credentials store for the platform if -// the store executable is available. +// no user-defined store is passed, and the store executable is available. func DetectDefaultStore(store string) string { - platformDefault := defaultCredentialsStore() - - // user defined or no default for platform - if store != "" || platformDefault == "" { + if store != "" { + // use user-defined return store } - if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { - return platformDefault + platformDefault := defaultCredentialsStore() + if platformDefault == "" { + return "" + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err != nil { + return "" } - return "" + return platformDefault } diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go index c9630ea5..40c16eb8 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin && !linux -// +build !windows,!darwin,!linux package credentials diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go index ea30fc30..95406281 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -1,6 +1,8 @@ package credentials import ( + "net" + "net/url" "strings" "github.com/docker/cli/cli/config/types" @@ -23,8 +25,13 @@ func NewFileStore(file store) Store { return &fileStore{file: file} } -// Erase removes the given credentials from the file store. +// Erase removes the given credentials from the file store.This function is +// idempotent and does not update the file if credentials did not change. func (c *fileStore) Erase(serverAddress string) error { + if _, exists := c.file.GetAuthConfigs()[serverAddress]; !exists { + // nothing to do; no credentials found for the given serverAddress + return nil + } delete(c.file.GetAuthConfigs(), serverAddress) return c.file.Save() } @@ -50,9 +57,14 @@ func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { return c.file.GetAuthConfigs(), nil } -// Store saves the given credentials in the file store. +// Store saves the given credentials in the file store. This function is +// idempotent and does not update the file if credentials did not change. func (c *fileStore) Store(authConfig types.AuthConfig) error { authConfigs := c.file.GetAuthConfigs() + if oldAuthConfig, ok := authConfigs[authConfig.ServerAddress]; ok && oldAuthConfig == authConfig { + // Credentials didn't change, so skip updating the configuration file. + return nil + } authConfigs[authConfig.ServerAddress] = authConfig return c.file.Save() } @@ -68,14 +80,17 @@ func (c *fileStore) IsFileStore() bool { // ConvertToHostname converts a registry url which has http|https prepended // to just an hostname. // Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") +func ConvertToHostname(maybeURL string) string { + stripped := maybeURL + if strings.Contains(stripped, "://") { + u, err := url.Parse(stripped) + if err == nil && u.Hostname() != "" { + if u.Port() == "" { + return u.Hostname() + } + return net.JoinHostPort(u.Hostname(), u.Port()) + } } - hostName, _, _ := strings.Cut(stripped, "/") return hostName } diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go index f9619b03..b9af145b 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -51,6 +51,7 @@ func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { auth.Username = creds.Username auth.IdentityToken = creds.IdentityToken auth.Password = creds.Password + auth.ServerAddress = creds.ServerAddress return auth, nil } @@ -76,6 +77,9 @@ func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { ac.Username = creds.Username ac.Password = creds.Password ac.IdentityToken = creds.IdentityToken + if ac.ServerAddress == "" { + ac.ServerAddress = creds.ServerAddress + } authConfigs[registry] = ac } diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go index 678153cf..7ca5ab72 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -16,11 +16,9 @@ func isValidCredsMessage(msg string) error { if credentials.IsCredentialsMissingServerURLMessage(msg) { return credentials.NewErrCredentialsMissingServerURL() } - if credentials.IsCredentialsMissingUsernameMessage(msg) { return credentials.NewErrCredentialsMissingUsername() } - return nil } @@ -36,13 +34,10 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error { out, err := cmd.Output() if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) } return nil @@ -55,17 +50,15 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error out, err := cmd.Output() if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { + if credentials.IsErrCredentialsNotFoundMessage(string(out)) { return nil, credentials.NewErrCredentialsNotFound() } - if isValidErr := isValidCredsMessage(t); isValidErr != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) } resp := &credentials.Credentials{ diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go index 8fa4d5d2..2283d5a4 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -1,6 +1,9 @@ package credentials -import "errors" +import ( + "errors" + "strings" +) const ( // ErrCredentialsNotFound standardizes the not found error, so every helper returns @@ -47,7 +50,7 @@ func IsErrCredentialsNotFound(err error) bool { // This function helps to check messages returned by an // external program via its standard output. func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage + return strings.TrimSpace(err) == errCredentialsNotFoundMessage } // errCredentialsMissingServerURL represents an error raised @@ -104,7 +107,7 @@ func IsCredentialsMissingServerURL(err error) bool { // IsCredentialsMissingServerURLMessage checks for an // errCredentialsMissingServerURL in the error message. func IsCredentialsMissingServerURLMessage(err string) bool { - return err == errCredentialsMissingServerURLMessage + return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage } // IsCredentialsMissingUsername returns true if the error @@ -117,5 +120,5 @@ func IsCredentialsMissingUsername(err error) bool { // IsCredentialsMissingUsernameMessage checks for an // errCredentialsMissingUsername in the error message. func IsCredentialsMissingUsernameMessage(err string) bool { - return err == errCredentialsMissingUsernameMessage + return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage } diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index 5f93eeb4..00000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2456 +0,0 @@ -# File @generated by hack/generate-authors.sh. DO NOT EDIT. -# This file lists all contributors to the repository. -# See hack/generate-authors.sh to make modifications. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Aaron Yoshitake -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Abirdcfly -Ada Mancini -Adam Avilla -Adam Dobrawy -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Thornton -Adam Walz -Adam Williams -AdamKorcz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akhil Mohan -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Akshay Moghe -Al Tobey -alambike -Alan Hoyle -Alan Scherger -Alan Thompson -Alano Terblanche -Albert Callarisa -Albert Zhang -Albin Kerouanton -Alec Benson -Alejandro González Hevia -Aleksa Sarai -Aleksandr Chebotov -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Nordlund -Alex Olshansky -Alex Samorukov -Alex Stockinger -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Polakov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis Ries -Alexis Thomas -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Ameya Gawde -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anca Iordache -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Denisse Gómez -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Ushakov -Andrei Vagin -Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kim -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Lindeman -Andy Rothfusz -Andy Smith -Andy Wilson -Andy Zhang -Aneesh Kulkarni -Anes Hasicic -Angel Velazquez -Anil Belur -Anil Madhavapeddy -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Aguilar -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anuj Varma -Anusha Ragunathan -Anyu Wang -apocas -Arash Deshmeh -arcosx -ArikaChen -Arko Dasgupta -Arnaud Lefebvre -Arnaud Porterie -Arnaud Rebillout -Artem Khramov -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -Austin Vazquez -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -Azat Khuyiyakhmetov -Bao Yonglei -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -Bastien Pascard -bdevloed -Bearice Ren -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Langfeld -Ben Lovy -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Böhmke -Benjamin Wang -Benjamin Yolken -Benny Ng -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bertrand Roussel -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Billy Ridgway -Bily Zhang -Bin Liu -Bingshen Wang -Bjorn Neergaard -Blake Geno -Boaz Shuster -bobby abbott -Bojun Zhu -Boqin Qin -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brennan Kinney <5098581+polarathene@users.noreply.github.com> -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Milford -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Calvin Liu -Cameron Boehmer -Cameron Sparr -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chee Hau Lim -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -Chentianze -Chenyang Yan -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -Chiranjeevi Tirunagari -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris Kreussling (Flatbush Gardener) -Chris McKinnel -Chris McKinnel -Chris Price -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Becker -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christoph Ziebuhr -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Petito -Christopher Rigor -Christy Norman -Chun Chen -Ciro S. Costa -Clayton Coleman -Clint Armstrong -Clinton Kitson -clubby789 -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Conor Evans -Corbin Coleman -Corey Farrell -Cory Forsyth -Cory Snider -cressie176 -Cristian Ariza -Cristian Staretu -cristiano balducci -Cristina Yenyxe Gonzalez Garcia -Cruceru Calin-Cristian -cui fliter -CUI Wei -Cuong Manh Le -Cyprian Gracz -Cyril F -Da McGrady -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Plamadeala -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Black -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel P. Berrangé -Daniel Robinson -Daniel S -Daniel Sweet -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Daniele Rondina -Danny Berger -Danny Milosavljevic -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Bellotti -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Karlsson <35727626+dvdksn@users.noreply.github.com> -David Lawrence -David Lechner -David M. Karr -David Mackey -David Manouchehri -David Mat -David Mcanulty -David McKay -David O'Rourke -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -Devon Estes -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Dhilip Kumars -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -dingwei -Diogo Monica -DiuDiugirl -Djibril Koné -Djordje Lukic -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dmytro Iakovliev -docker-unir[bot] -Dolph Mathews -Dominic Tubach -Dominic Yin -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Dorin Geman -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Koromilas -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Eng Zer Jun -Enguerran -Enrico Weigelt, metux IT consult -Eohyung Lee -epeterso -er0k -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Mountain -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erich Cordoba -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Sipsma -Erik Sjölund -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Espen Suenson -Ethan Bell -Ethan Mosbaugh -Euan Harris -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Lezar -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeniy Makhrov -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felipe Ruhland -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Feroz Salam -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Filipe Pina -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Schmaus -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Degrassi -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Frank Villaro-Dixon -Frank Yang -Fred Lifton -Frederick F. Kautz IV -Frederico F. de Oliveira -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -frobnicaty <92033765+frobnicaty@users.noreply.github.com> -Frédéric Dalleau -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Adrian Samfira -Gabriel Goller -Gabriel L. Somlo -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gabriel Tomitsuka -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -Gaurav Singh -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoff Levand -Geoffrey Bachelet -Geon Kim -George Kontridze -George Ma -George MacRorie -George Xie -Georgi Hristozov -Georgy Yakovlev -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Giuseppe Scrivano -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Goldwyn Rodrigues -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz Jaśkiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -Gunadhya S. <6939749+gunadhya@users.noreply.github.com> -Guoqiang QI -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -Haichao Yang -haikuoliu -haining.cao -Hakan Özler -Hamish Hutchings -Hannes Ljungberg -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harald Niesche -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -Hongxu Jia -Honza Pokorny -Hsing-Hui Hsu -Hsing-Yu (David) Chen -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huajin Tong -huang-jl <1046678590@qq.com> -HuanHuan Ye -Huanzhong Zhang -Huayi Zhang -Hugo Barrera -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hui Kang -Hunter Blanks -huqun -Huu Nguyen -Hyeongkyu Lee -Hyzhou Zhy -Iago López Galeiras -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Illia Antypenko -Illo Abdulrahim -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imalasong <2879499479@qq.com> -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isaiah Grace -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jack Walker <90711509+j2walker@users.noreply.github.com> -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaime Cepeda -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -Jakub Drahos -Jakub Guzik -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Sanders -James Turnbull -James Watkins-Harvey -Jamie Hannaford -Jamshid Afshar -Jan Breig -Jan Chren -Jan Garcia -Jan Götte -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslav Jindrak -Jaroslaw Zabiello -Jasmine Hegman -Jason A. Donenfeld -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -Jasper Siepkes -Javier Bassi -jaxgeller -Jay -Jay Kamat -Jay Lim -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Michel Rouet -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeff Zvier -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeongseok Kang -Jeremy Chambers -Jeremy Grosser -Jeremy Huntwork -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jeyanthinath Muthuram -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zeng -Jian Zhang -Jiang Jinyang -Jianyong Wu -Jie Luo -Jie Ma -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Carroll -Jim Ehrismann -Jim Galasyn -Jim Lin -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -Joakim Roubert -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Dohse -Jonas Geiler -Jonas Heinrich -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jordi Massaguer Pla -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julien Pivotto -Julio Guerra -Julio Montes -Jun Du -Jun-Ru Chang -junxu -Jussi Nummelin -Justas Brazauskas -Justen Martin -Justin Chadwell -Justin Cormack -Justin Force -Justin Keller <85903732+jk-vb@users.noreply.github.com> -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérémy Leherpeur -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kaijie Chen -Kamil Domański -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Kazuyoshi Kato -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Bannister -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kenta Tada -Kevin "qwazerty" Houdebert -Kevin Alvarez -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Parsons -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -Kirk Easterson -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konrad Ponichtera -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Kostadin Plachkov -kpcyrd -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Krystian Wojcicki -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Squizzato -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Andringa -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Brehm -Laura Frank -Laurent Bernaille -Laurent Erignoux -Laurie Voss -Leandro Motta Barros -Leandro Siqueira -Lee Calcote -Lee Chao <932819864@qq.com> -Lee, Meng-Han -Lei Gong -Lei Jitang -Leiiwang -Len Weincier -Lennie -Leo Gallucci -Leonardo Nodari -Leonardo Taccari -Leszek Kowalski -Levi Blackstone -Levi Gross -Levi Harrison -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liangwei -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limeidan -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Delossantos -Louis Opter -Luboslav Pivarc -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Henrique Mulinari -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Heeren -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marat Radchenko -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Feit -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark Vainomaa -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Braun -Martin Dojcak -Martin Honermeyer -Martin Jirku -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Maru Newby -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Mathieu Paturel -Matt Apperson -Matt Bachmann -Matt Bajor -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Morrison <3maven@gmail.com> -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Fronton -Matthieu Hauglustaine -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Harmathy -Max Shytikov -Max Timchenko -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Menghui Chen -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Beskin -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Kebe -Michael Kuehn -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael Weidmann -Michael West -Michael Zhao -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Kostrzewa -Michal Minář -Michal Rostecki -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michał Gryko -Michał Kosek -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Miguel Perez -Mihai Borobocea -Mihuleacc Sergiu -Mikael Davranche -Mike Brown -Mike Bush -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -Mike Sul -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milas Bowman -Milind Chawre -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohd Sadiq -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Muhammad Zohaib Aslam -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Carlson -Nathan Herald -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Adcock -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Santos -Nick Stenning -Nick Stinemates -Nick Wood -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Niel Drummond -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Edigaryev -Nikolay Milovanov -ningmingxiao -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Nolan Miles -Noriki Nakamura -nponeccop -Nurahmadie -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Odin Ugedal -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Olly Pomeroy -Omri Shiv -Onur Filiz -Oriol Francès -Oscar Bonilla <6f6231@gmail.com> -oscar.chen <2972789494@qq.com> -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Bach -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Haas -Patrick Hemmer -Patrick St. laurent -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul "TBBle" Hampson -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Seiffert -Paul Weaver -Paulo Gomes -Paulo Ribeiro -Pavel Lobashov -Pavel Matěja -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Paweł Gronowski -payall4u -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Pete Woods -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Petros Angelatos -Phil -Phil Estes -Phil Sphicas -Phil Spitler -Philip Alexander Etling -Philip K. Warren -Philip Monroe -Philipp Fruck -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -Piotr Karbowski -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Pradipta Kr. Banerjee -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Puneet Pruthi -Pure White -pysqz -Qiang Huang -Qin TianHuan -Qinglan Peng -Quan Tian -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rachit Sharma -Radostin Stoyanov -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -realityone -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Horwood -Rich Moyse -Rich Seymour -Richard Burnison -Richard Hansen -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Cowsill <42620235+rcowsill@users.noreply.github.com> -Rob Gulewich -Rob Murray -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Shade -Robert Stern -Robert Terhaar -Robert Wallis -Robert Wang -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -Robin Thoni -robpc -Rodolfo Carvalho -Rodrigo Campos -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Mazur -Roman Strashkin -Roman Volosatovs -Roman Zabaluev -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -Rony Weng -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Roy Reznik -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui JingAn -Rui Lopes -Ruilin Li -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Barry -Ryan Belgrave -Ryan Campbell -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Shea -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Ryoga Saito -Régis Behmo -Rémy Greinhofer -s. rannou -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sam Thibault -Sam Whited -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -sanchayanghosh -Sandeep Bansal -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -SataQiu -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Moser -Scott Percival -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Sebastian Höffner -Sebastian Radloff -Sebastian Thomschke -Sebastien Goasguen -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -Serhii Nakon -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayan Pooya -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shengjing Zhu -Shev Yan -Shih-Yuan Lee -Shihao Xia -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -Shu-Wai Chow -shuai-z -Shukui Yang -Sian Lerk Lau -Siarhei Rasiukevich -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simon Barendse -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -skanehira -Smark Meng -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Sotiris Salloumis -Soulou -Spencer Brown -Spencer Smith -Spike Curtis -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Staf Wagemakers -Stanislav Bondarenko -Stanislav Levin -Steeve Morin -Stefan Berger -Stefan Gehrig -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Steffen Butzer -Stephan Henningsen -Stephan Spindler -Stephen Benjamin -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Stéphane Este-Gracias -Stig Larsson -Su Wang -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sune Keller -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Sören Tempel -Tabakhase -Tadej Janež -Takuto Sato -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -Terry Chu -terryding77 <550147740@qq.com> -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thiago Alves Silva -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Graf -Thomas Grainger -Thomas Hansen -Thomas Ledos -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tiago Seabra -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Till Claassen -Till Wegmüller -Tim -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wagner -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timchenxiaoyu <837829664@qq.com> -timfeirg -Timo Rothenpieler -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Pfandzelter -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Parker -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tom Zhao -Tomas Janousek -Tomas Kral -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tomek Mańko -Tommaso Visconti -Tomoya Tabuchi -Tomáš Hrčka -Tomáš Virtus -tonic -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Toshiaki Makita -Tõnis Tiigi -Trace Andreason -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tudor Brindus -Ty Alexander -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -Valentin Kulesh -vanderliang -Velko Ivanov -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Toni -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Vikas Choudhary -Vikram bir Singh -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Boulineau -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Anjos -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vladislav Kolesnikov -Vlastimil Zeman -Vojtech Vitek (V-Teq) -voloder <110066198+voloder@users.noreply.github.com> -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -wanghuaiqing -Ward Vandewege -WarheadsSE -Wassim Dhif -Wataru Ishida -Wayne Chang -Wayne Song -weebney -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -wenlxie -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wesley Pettit -Wewang Xiaorenfine -Wiktor Kwapisiewicz -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -Wilson Júnior -Wing-Kam Wong -WiseTrem -Wolfgang Nagele -Wolfgang Powisch -Wonjun Kim -WuLonghui -xamyzhao -Xia Wu -Xian Chaobo -Xianglin Gao -Xianjie -Xianlu Bird -Xiao YongBiao -Xiao Zhang -XiaoBing Jiang -Xiaodong Liu -Xiaodong Zhang -Xiaohua Ding -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -xin.li -Xinbo Weng -Xinfeng Liu -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -yalpul -YAMADA Tsuyoshi -Yamasaki Masahide -Yamazaki Masashi -Yan Feng -Yan Zhu -Yang Bai -Yang Li -Yang Pengfei -yangchenliang -Yann Autissier -Yanqiang Miao -Yao Zaiyong -Yash Murty -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -youcai -Youcef YEKHLEF -Youfu Zhang -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yufei Xiong -Yuhao Fang -Yuichiro Kaneko -YujiOshima -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Blusseau <90z7oey02@sneakemail.com> -Yves Junqueira -Zac Dover -Zach Borboa -Zach Gershman -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -zhangguanzhang -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -ZhiPeng Lu -zhipengzuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Ziheng Liu -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Álvaro Lázaro -Átila Camurça Alves -吴小白 <296015668@qq.com> -尹吉峰 -屈骏 -徐俊杰 -慕陶 -搏通 -黄艳红00139573 -정재영 diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 58b19b6d..00000000 --- a/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go deleted file mode 100644 index c0ab3f5b..00000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir.go +++ /dev/null @@ -1,28 +0,0 @@ -package homedir - -import ( - "os" - "os/user" - "runtime" -) - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// On non-Windows platforms, it falls back to nss lookups, if the home -// directory cannot be obtained from environment-variables. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home, _ := os.UserHomeDir() - if home == "" && runtime.GOOS != "windows" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index ded1c7c8..00000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,105 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" - "os" - "path/filepath" - "strings" -) - -// GetRuntimeDir returns XDG_RUNTIME_DIR. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return xdgRuntimeDir, nil - } - return "", errors.New("could not get XDG_RUNTIME_DIR") -} - -// StickRuntimeDirContents sets the sticky bit on files that are under -// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. -// -// StickyRuntimeDir returns slice of sticked files. -// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func StickRuntimeDirContents(files []string) ([]string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - // ignore error if runtimeDir is empty - return nil, nil - } - runtimeDir, err = filepath.Abs(runtimeDir) - if err != nil { - return nil, err - } - var sticked []string - for _, f := range files { - f, err = filepath.Abs(f) - if err != nil { - return sticked, err - } - if strings.HasPrefix(f, runtimeDir+"/") { - if err = stick(f); err != nil { - return sticked, err - } - sticked = append(sticked, f) - } - } - return sticked, nil -} - -func stick(f string) error { - st, err := os.Stat(f) - if err != nil { - return err - } - m := st.Mode() - m |= os.ModeSticky - return os.Chmod(f, m) -} - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// If HOME and XDG_DATA_HOME are not set, getpwent(3) is consulted to determine the users home directory. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// If HOME and XDG_CONFIG_HOME are not set, getpwent(3) is consulted to determine the users home directory. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - -// GetLibHome returns $HOME/.local/lib -// If HOME is not set, getpwent(3) is consulted to determine the users home directory. -func GetLibHome() (string, error) { - home := Get() - if home == "" { - return "", errors.New("could not get HOME") - } - return filepath.Join(home, ".local/lib"), nil -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index 4eeb26b5..00000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !linux - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" -) - -// GetRuntimeDir is unsupported on non-linux system. -func GetRuntimeDir() (string, error) { - return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") -} - -// StickRuntimeDirContents is unsupported on non-linux system. -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") -} - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} - -// GetLibHome is unsupported on non-linux system. -func GetLibHome() (string, error) { - return "", errors.New("homedir.GetLibHome() is not supported on this system") -} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992..f4e7dbf3 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001b..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076..daea9dd6 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e57575..fa854785 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759..e4ac2a2f 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1d..c349c326 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e..36c31169 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915..d8de5ab7 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c0..5eb5dbc6 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d..c54a6308 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc4..0760efe9 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 00000000..b0eab100 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 00000000..928319fb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 00000000..3186b0c3 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 00000000..f69fdb93 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 00000000..607e683b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 00000000..35c734be --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 00000000..e5b3b6f6 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 00000000..1dd455bc --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 00000000..f1b2e73b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 00000000..52bf4ce5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 00000000..547df1df --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 00000000..7daa45e1 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 00000000..30976ce9 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 00000000..37dfeddc --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 00000000..a72c6495 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6..00000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b8..f65e8fe3 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa7..a29fc7aa 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore similarity index 50% rename from vendor/gopkg.in/go-jose/go-jose.v2/.gitignore rename to vendor/github.com/go-jose/go-jose/v4/.gitignore index 95a85158..eb29ebae 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore +++ b/vendor/github.com/go-jose/go-jose/v4/.gitignore @@ -1,8 +1,2 @@ -*~ -.*.swp -*.out -*.test -*.pem -*.cov jose-util/jose-util jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml new file mode 100644 index 00000000..2a577a8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v4/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml new file mode 100644 index 00000000..48de631b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md similarity index 84% rename from vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md rename to vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md index 8e6e9132..6f717dbd 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -1,3 +1,27 @@ +# v4.0.4 + +## Fixed + + - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a + breaking change. See #136 / #137. + +# v4.0.3 + +## Changed + + - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) + - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) + - Dependency updates + +# v4.0.2 + +## Changed + + - Improved documentation of Verify() to note that JSONWebKeySet is a supported + argument type (#104) + - Defined exported error values for missing x5c header and unsupported elliptic + curves error cases (#117) + # v4.0.1 ## Fixed @@ -45,12 +69,6 @@ token". [1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf -# v3.0.3 - -## Fixed - - - Limit decompression output size to prevent a DoS. Backport from v4.0.1. - # v3.0.2 ## Fixed @@ -76,9 +94,3 @@ token". amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the disclosure and to Tom Tervoort for originally publishing the category of attack. https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf - -# v2.6.3 - -## Fixed - - - Limit decompression output size to prevent a DoS. Backport from v4.0.1. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md similarity index 75% rename from vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md rename to vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md index 61b18365..b63e1f8f 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md +++ b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md @@ -9,6 +9,7 @@ sure all tests pass by running `go test`, and format your code with `go fmt`. We also recommend using `golint` and `errcheck`. Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. +Individual Contributor License Agreement. We use [cla-assistant.io][1] and you +will be prompted to sign once a pull request is opened. - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 +[1]: https://cla-assistant.io/ diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE b/vendor/github.com/go-jose/go-jose/v4/LICENSE similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/LICENSE rename to vendor/github.com/go-jose/go-jose/v4/LICENSE diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md new file mode 100644 index 00000000..79a7c5ec --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/README.md @@ -0,0 +1,114 @@ +# Go JOSE + +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) +[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +Tables of supported algorithms are shown below. The library supports both +the compact and JWS/JWE JSON Serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +[Version 4](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/main), +[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: + + import "github.com/go-jose/go-jose/v4" + +The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which +are still useable but not actively developed anymore. + +Version 3, in this repo, is still receiving security fixes but not functionality +updates. + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 or later of the package + +## Examples + +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md new file mode 100644 index 00000000..2f18a75a --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy +This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. + +## Supported Versions +| Version | Supported | +| ------- | ----------| +| >= v3 | ✓ | +| v2 | ✗ | +| v1 | ✗ | + +## Reporting a vulnerability + +Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go similarity index 99% rename from vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go rename to vendor/github.com/go-jose/go-jose/v4/asymmetric.go index 43f9ce2f..f8d5774e 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go +++ b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go @@ -20,6 +20,7 @@ import ( "crypto" "crypto/aes" "crypto/ecdsa" + "crypto/ed25519" "crypto/rand" "crypto/rsa" "crypto/sha1" @@ -28,9 +29,8 @@ import ( "fmt" "math/big" - "golang.org/x/crypto/ed25519" - josecipher "gopkg.in/go-jose/go-jose.v2/cipher" - "gopkg.in/go-jose/go-jose.v2/json" + josecipher "github.com/go-jose/go-jose/v4/cipher" + "github.com/go-jose/go-jose/v4/json" ) // A generic RSA-based encrypter/verifier diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go similarity index 98% rename from vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go rename to vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go index 87065a5b..af029cec 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go @@ -114,7 +114,7 @@ func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) + buffer := append([]byte{}, ciphertext[:offset]...) if len(buffer)%ctx.blockCipher.BlockSize() > 0 { return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go rename to vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go rename to vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go similarity index 97% rename from vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go rename to vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go index 668358f9..b9effbca 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go +++ b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go @@ -51,7 +51,7 @@ func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { binary.BigEndian.PutUint64(tBytes, uint64(t+1)) for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] + buffer[i] ^= tBytes[i] } copy(r[t%n], buffer[8:]) } @@ -87,7 +87,7 @@ func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { binary.BigEndian.PutUint64(tBytes, uint64(t+1)) for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] + buffer[i] ^= tBytes[i] } copy(buffer[8:], r[t%n]) diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go similarity index 83% rename from vendor/gopkg.in/go-jose/go-jose.v2/crypter.go rename to vendor/github.com/go-jose/go-jose/v4/crypter.go index 0ae2e5eb..d81b03b4 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -21,9 +21,8 @@ import ( "crypto/rsa" "errors" "fmt" - "reflect" - "gopkg.in/go-jose/go-jose.v2/json" + "github.com/go-jose/go-jose/v4/json" ) // Encrypter represents an encrypter which produces an encrypted JWE object. @@ -76,14 +75,24 @@ type recipientKeyInfo struct { type EncrypterOptions struct { Compression CompressionAlgorithm - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // Optional map of name/value pairs to be inserted into the protected + // header of a JWS object. Some specifications which make use of + // JWS require additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated EncrypterOptions. +// +// The v parameter will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { if eo.ExtraHeaders == nil { eo.ExtraHeaders = map[HeaderKey]interface{}{} @@ -111,7 +120,17 @@ func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { // default of 100000 will be used for the count and a 128-bit random salt will // be generated. type Recipient struct { - Algorithm KeyAlgorithm + Algorithm KeyAlgorithm + // Key must have one of these types: + // - ed25519.PublicKey + // - *ecdsa.PublicKey + // - *rsa.PublicKey + // - *JSONWebKey + // - JSONWebKey + // - []byte (a symmetric key) + // - Any type that satisfies the OpaqueKeyEncrypter interface + // + // The type of Key must match the value of Algorithm. Key interface{} KeyID string PBES2Count int @@ -150,16 +169,17 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) switch rcpt.Algorithm { case DIRECT: // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + keyBytes, ok := rawKey.([]byte) + if !ok { return nil, ErrUnsupportedKeyType } - if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + if encrypter.cipher.keySize() != len(keyBytes) { return nil, ErrInvalidKeySize } encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), + key: keyBytes, } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -168,16 +188,16 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) return encrypter, nil case ECDH_ES: // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + keyDSA, ok := rawKey.(*ecdsa.PublicKey) + if !ok { return nil, ErrUnsupportedKeyType } encrypter.keyGenerator = ecKeyGenerator{ size: encrypter.cipher.keySize(), algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), + publicKey: keyDSA, } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -201,7 +221,7 @@ func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *Encrypter if cipher == nil { return nil, ErrUnsupportedAlgorithm } - if rcpts == nil || len(rcpts) == 0 { + if len(rcpts) == 0 { return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") } @@ -270,9 +290,8 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey recipient, err := makeJWERecipient(alg, encryptionKey.Key) recipient.keyID = encryptionKey.KeyID return recipient, err - } - if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { - return newOpaqueKeyEncrypter(alg, encrypter) + case OpaqueKeyEncrypter: + return newOpaqueKeyEncrypter(alg, encryptionKey) } return recipientKeyInfo{}, ErrUnsupportedKeyType } @@ -300,11 +319,11 @@ func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { return newDecrypter(decryptionKey.Key) case *JSONWebKey: return newDecrypter(decryptionKey.Key) + case OpaqueKeyDecrypter: + return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { - return &opaqueKeyDecrypter{decrypter: okd}, nil - } - return nil, ErrUnsupportedKeyType } // Implementation of encrypt method producing a JWE object. @@ -403,10 +422,25 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { } } -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient +// Decrypt and validate the object and return the plaintext. This +// function does not support multi-recipient. If you desire multi-recipient // decryption use DecryptMulti instead. // +// The decryptionKey argument must contain a private or symmetric key +// and must have one of these types: +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet +// - []byte (a symmetric key) +// - string (a symmetric key) +// - Any type that satisfies the OpaqueKeyDecrypter interface. +// +// Note that ed25519 is only available for signatures, not encryption, so is +// not an option here. +// // Automatically decompresses plaintext, but returns an error if the decompressed // data would be >250kB or >10x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { @@ -425,7 +459,11 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - decrypter, err := newDecrypter(decryptionKey) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return nil, err + } + decrypter, err := newDecrypter(key) if err != nil { return nil, err } @@ -464,9 +502,12 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { plaintext, err = decompress(comp, plaintext) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } - return plaintext, err + return plaintext, nil } // DecryptMulti decrypts and validates the object and returns the plaintexts, @@ -474,6 +515,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // for which the decryption was successful, the merged headers for that recipient, // and the plaintext. // +// The decryptionKey argument must have one of the types allowed for the +// decryptionKey argument of Decrypt(). +// // Automatically decompresses plaintext, but returns an error if the decompressed // data would be >250kB or >3x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { @@ -488,7 +532,11 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - decrypter, err := newDecrypter(decryptionKey) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return -1, Header{}, nil, err + } + decrypter, err := newDecrypter(key) if err != nil { return -1, Header{}, nil, err } @@ -530,13 +578,16 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade } } - if plaintext == nil || err != nil { + if plaintext == nil { return -1, Header{}, nil, ErrCryptoFailure } // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { plaintext, err = decompress(comp, plaintext) + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } sanitized, err := headers.sanitized() diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/doc.go b/vendor/github.com/go-jose/go-jose/v4/doc.go similarity index 84% rename from vendor/gopkg.in/go-jose/go-jose.v2/doc.go rename to vendor/github.com/go-jose/go-jose/v4/doc.go index dd1387f3..0ad40ca0 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/doc.go +++ b/vendor/github.com/go-jose/go-jose/v4/doc.go @@ -15,13 +15,11 @@ */ /* - Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. It implements encryption and signing based on -the JSON Web Encryption and JSON Web Signature standards, with optional JSON -Web Token support available in a sub-package. The library supports both the -compact and full serialization formats, and has optional support for multiple +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. - */ package jose diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go similarity index 87% rename from vendor/gopkg.in/go-jose/go-jose.v2/encoding.go rename to vendor/github.com/go-jose/go-jose/v4/encoding.go index 636f6c8f..4f6e0d4a 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go +++ b/vendor/github.com/go-jose/go-jose/v4/encoding.go @@ -27,7 +27,7 @@ import ( "strings" "unicode" - "gopkg.in/go-jose/go-jose.v2/json" + "github.com/go-jose/go-jose/v4/json" ) // Helper function to serialize known-good objects. @@ -106,10 +106,7 @@ func inflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) reader := flate.NewReader(bytes.NewBuffer(input)) - maxCompressedSize := 10 * int64(len(input)) - if maxCompressedSize < 250000 { - maxCompressedSize = 250000 - } + maxCompressedSize := max(250_000, 10*int64(len(input))) limit := maxCompressedSize + 1 n, err := io.CopyN(output, reader, limit) @@ -196,3 +193,36 @@ func (b byteBuffer) bigInt() *big.Int { func (b byteBuffer) toInt() int { return int(b.bigInt().Int64()) } + +func base64EncodeLen(sl []byte) int { + return base64.RawURLEncoding.EncodedLen(len(sl)) +} + +func base64JoinWithDots(inputs ...[]byte) string { + if len(inputs) == 0 { + return "" + } + + // Count of dots. + totalCount := len(inputs) - 1 + + for _, input := range inputs { + totalCount += base64EncodeLen(input) + } + + out := make([]byte, totalCount) + startEncode := 0 + for i, input := range inputs { + base64.RawURLEncoding.Encode(out[startEncode:], input) + + if i == len(inputs)-1 { + continue + } + + startEncode += base64EncodeLen(input) + out[startEncode] = '.' + startEncode++ + } + + return string(out) +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE rename to vendor/github.com/go-jose/go-jose/v4/json/LICENSE diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/json/README.md rename to vendor/github.com/go-jose/go-jose/v4/json/README.md diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go similarity index 99% rename from vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go rename to vendor/github.com/go-jose/go-jose/v4/json/decode.go index 4dbc4146..50634dd8 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go +++ b/vendor/github.com/go-jose/go-jose/v4/json/decode.go @@ -75,14 +75,13 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// func Unmarshal(data []byte, v interface{}) error { // Check for well-formedness. // Avoids filling out half a data structure diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go similarity index 98% rename from vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go rename to vendor/github.com/go-jose/go-jose/v4/json/encode.go index 1dae8bb7..98de68ce 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go +++ b/vendor/github.com/go-jose/go-jose/v4/json/encode.go @@ -58,6 +58,7 @@ import ( // becomes a member of the object unless // - the field's tag is "-", or // - the field is empty and its tag specifies the "omitempty" option. +// // The empty values are false, 0, any // nil pointer or interface value, and any array, slice, map, or string of // length zero. The object's default key string is the struct field name @@ -65,28 +66,28 @@ import ( // the struct field's tag value is the key name, followed by an optional comma // and options. Examples: // -// // Field is ignored by this package. -// Field int `json:"-"` +// // Field is ignored by this package. +// Field int `json:"-"` // -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` // -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` // -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // -// Int64String int64 `json:",string"` +// Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, dollar signs, percent signs, hyphens, @@ -133,7 +134,6 @@ import ( // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. -// func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) @@ -648,7 +648,7 @@ func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) { // for large buffers, avoid unnecessary extra temporary // buffer space. enc := base64.NewEncoder(base64.StdEncoding, e) - enc.Write(s) + _, _ = enc.Write(s) enc.Close() } e.WriteByte('"') diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/indent.go b/vendor/github.com/go-jose/go-jose/v4/json/indent.go similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/json/indent.go rename to vendor/github.com/go-jose/go-jose/v4/json/indent.go diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/scanner.go b/vendor/github.com/go-jose/go-jose/v4/json/scanner.go similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/json/scanner.go rename to vendor/github.com/go-jose/go-jose/v4/json/scanner.go diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/stream.go b/vendor/github.com/go-jose/go-jose/v4/json/stream.go similarity index 99% rename from vendor/gopkg.in/go-jose/go-jose.v2/json/stream.go rename to vendor/github.com/go-jose/go-jose/v4/json/stream.go index 9b2b926b..f03b171e 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/stream.go +++ b/vendor/github.com/go-jose/go-jose/v4/json/stream.go @@ -240,7 +240,6 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// type Token interface{} const ( diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/tags.go b/vendor/github.com/go-jose/go-jose/v4/json/tags.go similarity index 100% rename from vendor/gopkg.in/go-jose/go-jose.v2/json/tags.go rename to vendor/github.com/go-jose/go-jose/v4/json/tags.go diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go similarity index 64% rename from vendor/gopkg.in/go-jose/go-jose.v2/jwe.go rename to vendor/github.com/go-jose/go-jose/v4/jwe.go index a8966ab8..89f03ee3 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go @@ -18,10 +18,11 @@ package jose import ( "encoding/base64" + "errors" "fmt" "strings" - "gopkg.in/go-jose/go-jose.v2/json" + "github.com/go-jose/go-jose/v4/json" ) // rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing. @@ -86,11 +87,12 @@ func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader { func (obj JSONWebEncryption) computeAuthData() []byte { var protected string - if obj.original != nil && obj.original.Protected != nil { + switch { + case obj.original != nil && obj.original.Protected != nil: protected = obj.original.Protected.base64() - } else if obj.protected != nil { + case obj.protected != nil: protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected))) - } else { + default: protected = "" } @@ -103,29 +105,75 @@ func (obj JSONWebEncryption) computeAuthData() []byte { return output } -// ParseEncrypted parses an encrypted message in compact or full serialization format. -func ParseEncrypted(input string) (*JSONWebEncryption, error) { +func containsKeyAlgorithm(haystack []KeyAlgorithm, needle KeyAlgorithm) bool { + for _, algorithm := range haystack { + if algorithm == needle { + return true + } + } + return false +} + +func containsContentEncryption(haystack []ContentEncryption, needle ContentEncryption) bool { + for _, algorithm := range haystack { + if algorithm == needle { + return true + } + } + return false +} + +// ParseEncrypted parses an encrypted message in JWE Compact or JWE JSON Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7516#section-3.1 +// https://datatracker.ietf.org/doc/html/rfc7516#section-3.2 +// +// The keyAlgorithms and contentEncryption parameters are used to validate the "alg" and "enc" +// header parameters respectively. They must be nonempty, and each "alg" or "enc" header in +// parsed data must contain a value that is present in the corresponding parameter. That +// includes the protected and unprotected headers as well as all recipients. To accept +// multiple algorithms, pass a slice of all the algorithms you want to accept. +func ParseEncrypted(input string, + keyEncryptionAlgorithms []KeyAlgorithm, + contentEncryption []ContentEncryption, +) (*JSONWebEncryption, error) { input = stripWhitespace(input) if strings.HasPrefix(input, "{") { - return parseEncryptedFull(input) + return ParseEncryptedJSON(input, keyEncryptionAlgorithms, contentEncryption) } - return parseEncryptedCompact(input) + return ParseEncryptedCompact(input, keyEncryptionAlgorithms, contentEncryption) } -// parseEncryptedFull parses a message in compact format. -func parseEncryptedFull(input string) (*JSONWebEncryption, error) { +// ParseEncryptedJSON parses a message in JWE JSON Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7516#section-3.2 +func ParseEncryptedJSON( + input string, + keyEncryptionAlgorithms []KeyAlgorithm, + contentEncryption []ContentEncryption, +) (*JSONWebEncryption, error) { var parsed rawJSONWebEncryption err := json.Unmarshal([]byte(input), &parsed) if err != nil { return nil, err } - return parsed.sanitized() + return parsed.sanitized(keyEncryptionAlgorithms, contentEncryption) } // sanitized produces a cleaned-up JWE object from the raw JSON. -func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { +func (parsed *rawJSONWebEncryption) sanitized( + keyEncryptionAlgorithms []KeyAlgorithm, + contentEncryption []ContentEncryption, +) (*JSONWebEncryption, error) { + if len(keyEncryptionAlgorithms) == 0 { + return nil, errors.New("go-jose/go-jose: no key algorithms provided") + } + if len(contentEncryption) == 0 { + return nil, errors.New("go-jose/go-jose: no content encryption algorithms provided") + } + obj := &JSONWebEncryption{ original: parsed, unprotected: parsed.Unprotected, @@ -184,10 +232,31 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { } } - for _, recipient := range obj.recipients { + for i, recipient := range obj.recipients { headers := obj.mergedHeaders(&recipient) - if headers.getAlgorithm() == "" || headers.getEncryption() == "" { - return nil, fmt.Errorf("go-jose/go-jose: message is missing alg/enc headers") + if headers.getAlgorithm() == "" { + return nil, fmt.Errorf(`go-jose/go-jose: recipient %d: missing header "alg"`, i) + } + if headers.getEncryption() == "" { + return nil, fmt.Errorf(`go-jose/go-jose: recipient %d: missing header "enc"`, i) + } + err := validateAlgEnc(headers, keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: recipient %d: %s", i, err) + } + + } + + if obj.protected != nil { + err := validateAlgEnc(*obj.protected, keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: protected header: %s", err) + } + } + if obj.unprotected != nil { + err := validateAlgEnc(*obj.unprotected, keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: unprotected header: %s", err) } } @@ -199,8 +268,26 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { return obj, nil } -// parseEncryptedCompact parses a message in compact format. -func parseEncryptedCompact(input string) (*JSONWebEncryption, error) { +func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncryption []ContentEncryption) error { + alg := headers.getAlgorithm() + enc := headers.getEncryption() + if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) { + return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms) + } + if alg != "" && !containsContentEncryption(contentEncryption, enc) { + return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption) + } + return nil +} + +// ParseEncryptedCompact parses a message in JWE Compact Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7516#section-3.1 +func ParseEncryptedCompact( + input string, + keyAlgorithms []KeyAlgorithm, + contentEncryption []ContentEncryption, +) (*JSONWebEncryption, error) { parts := strings.Split(input, ".") if len(parts) != 5 { return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") @@ -239,7 +326,7 @@ func parseEncryptedCompact(input string) (*JSONWebEncryption, error) { Tag: newBuffer(tag), } - return raw.sanitized() + return raw.sanitized(keyAlgorithms, contentEncryption) } // CompactSerialize serializes an object using the compact serialization format. @@ -251,13 +338,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) { serializedProtected := mustSerializeJSON(obj.protected) - return fmt.Sprintf( - "%s.%s.%s.%s.%s", - base64.RawURLEncoding.EncodeToString(serializedProtected), - base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey), - base64.RawURLEncoding.EncodeToString(obj.iv), - base64.RawURLEncoding.EncodeToString(obj.ciphertext), - base64.RawURLEncoding.EncodeToString(obj.tag)), nil + return base64JoinWithDots( + serializedProtected, + obj.recipients[0].encryptedKey, + obj.iv, + obj.ciphertext, + obj.tag, + ), nil } // FullSerialize serializes an object using the full JSON serialization format. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go similarity index 91% rename from vendor/gopkg.in/go-jose/go-jose.v2/jwk.go rename to vendor/github.com/go-jose/go-jose/v4/jwk.go index ea3d8634..8a528421 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -20,6 +20,7 @@ import ( "bytes" "crypto" "crypto/ecdsa" + "crypto/ed25519" "crypto/elliptic" "crypto/rsa" "crypto/sha1" @@ -34,9 +35,7 @@ import ( "reflect" "strings" - "golang.org/x/crypto/ed25519" - - "gopkg.in/go-jose/go-jose.v2/json" + "github.com/go-jose/go-jose/v4/json" ) // rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing. @@ -63,14 +62,26 @@ type rawJSONWebKey struct { Qi *byteBuffer `json:"qi,omitempty"` // Certificates X5c []string `json:"x5c,omitempty"` - X5u *url.URL `json:"x5u,omitempty"` + X5u string `json:"x5u,omitempty"` X5tSHA1 string `json:"x5t,omitempty"` X5tSHA256 string `json:"x5t#S256,omitempty"` } -// JSONWebKey represents a public or private key in JWK format. +// JSONWebKey represents a public or private key in JWK format. It can be +// marshaled into JSON and unmarshaled from JSON. type JSONWebKey struct { - // Cryptographic key, can be a symmetric or asymmetric key. + // Key is the Go in-memory representation of this key. It must have one + // of these types: + // - ed25519.PublicKey + // - ed25519.PrivateKey + // - *ecdsa.PublicKey + // - *ecdsa.PrivateKey + // - *rsa.PublicKey + // - *rsa.PrivateKey + // - []byte (a symmetric key) + // + // When marshaling this JSONWebKey into JSON, the "kty" header parameter + // will be automatically set based on the type of this field. Key interface{} // Key identifier, parsed from `kid` header. KeyID string @@ -156,7 +167,9 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) { } } - raw.X5u = k.CertificatesURL + if k.CertificatesURL != nil { + raw.X5u = k.CertificatesURL.String() + } return json.Marshal(raw) } @@ -238,13 +251,18 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { if certPub != nil && keyPub != nil { if !reflect.DeepEqual(certPub, keyPub) { - return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields to not match") + return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match") } } *k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use, Certificates: certs} - k.CertificatesURL = raw.X5u + if raw.X5u != "" { + k.CertificatesURL, err = url.Parse(raw.X5u) + if err != nil { + return fmt.Errorf("go-jose/go-jose: invalid JWK, x5u header is invalid URL: %w", err) + } + } // x5t parameters are base64url-encoded SHA thumbprints // See RFC 7517, Section 4.8, https://tools.ietf.org/html/rfc7517#section-4.8 @@ -383,6 +401,8 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { input, err = rsaThumbprintInput(key.N, key.E) case ed25519.PrivateKey: input, err = edThumbprintInput(ed25519.PublicKey(key[32:])) + case OpaqueSigner: + return key.Public().Thumbprint(hash) default: return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key)) } @@ -392,7 +412,7 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { } h := hash.New() - h.Write([]byte(input)) + _, _ = h.Write([]byte(input)) return h.Sum(nil), nil } @@ -740,7 +760,7 @@ func dSize(curve elliptic.Curve) int { bitLen := order.BitLen() size := bitLen / 8 if bitLen%8 != 0 { - size = size + 1 + size++ } return size } @@ -758,3 +778,46 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) { } return key.K.bytes(), nil } + +var ( + // ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a + // key ID which matches one in the provided tokens headers. + ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set") +) + +func tryJWKS(key interface{}, headers ...Header) (interface{}, error) { + var jwks JSONWebKeySet + + switch jwksType := key.(type) { + case *JSONWebKeySet: + jwks = *jwksType + case JSONWebKeySet: + jwks = jwksType + default: + // If the specified key is not a JWKS, return as is. + return key, nil + } + + // Determine the KID to search for from the headers. + var kid string + for _, header := range headers { + if header.KeyID != "" { + kid = header.KeyID + break + } + } + + // If no KID is specified in the headers, reject. + if kid == "" { + return nil, ErrJWKSKidNotFound + } + + // Find the JWK with the matching KID. If no JWK with the specified KID is + // found, reject. + keys := jwks.Key(kid) + if len(keys) == 0 { + return nil, ErrJWKSKidNotFound + } + + return keys[0].Key, nil +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go similarity index 79% rename from vendor/gopkg.in/go-jose/go-jose.v2/jws.go rename to vendor/github.com/go-jose/go-jose/v4/jws.go index 1a24fa46..3a912301 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/jws.go +++ b/vendor/github.com/go-jose/go-jose/v4/jws.go @@ -23,7 +23,7 @@ import ( "fmt" "strings" - "gopkg.in/go-jose/go-jose.v2/json" + "github.com/go-jose/go-jose/v4/json" ) // rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing. @@ -75,22 +75,41 @@ type Signature struct { original *rawSignatureInfo } -// ParseSigned parses a signed message in compact or full serialization format. -func ParseSigned(signature string) (*JSONWebSignature, error) { +// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7515#section-7 +func ParseSigned( + signature string, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { signature = stripWhitespace(signature) if strings.HasPrefix(signature, "{") { - return parseSignedFull(signature) + return ParseSignedJSON(signature, signatureAlgorithms) } - return parseSignedCompact(signature, nil) + return parseSignedCompact(signature, nil, signatureAlgorithms) +} + +// ParseSignedCompact parses a message in JWS Compact Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7515#section-7.1 +func ParseSignedCompact( + signature string, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { + return parseSignedCompact(signature, nil, signatureAlgorithms) } // ParseDetached parses a signed message in compact serialization format with detached payload. -func ParseDetached(signature string, payload []byte) (*JSONWebSignature, error) { +func ParseDetached( + signature string, + payload []byte, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { if payload == nil { return nil, errors.New("go-jose/go-jose: nil payload") } - return parseSignedCompact(stripWhitespace(signature), payload) + return parseSignedCompact(stripWhitespace(signature), payload, signatureAlgorithms) } // Get a header value @@ -137,19 +156,36 @@ func (obj JSONWebSignature) computeAuthData(payload []byte, signature *Signature return authData.Bytes(), nil } -// parseSignedFull parses a message in full format. -func parseSignedFull(input string) (*JSONWebSignature, error) { +// ParseSignedJSON parses a message in JWS JSON Serialization. +// +// https://datatracker.ietf.org/doc/html/rfc7515#section-7.2 +func ParseSignedJSON( + input string, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { var parsed rawJSONWebSignature err := json.Unmarshal([]byte(input), &parsed) if err != nil { return nil, err } - return parsed.sanitized() + return parsed.sanitized(signatureAlgorithms) +} + +func containsSignatureAlgorithm(haystack []SignatureAlgorithm, needle SignatureAlgorithm) bool { + for _, algorithm := range haystack { + if algorithm == needle { + return true + } + } + return false } // sanitized produces a cleaned-up JWS object from the raw JSON. -func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { +func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) { + if len(signatureAlgorithms) == 0 { + return nil, errors.New("go-jose/go-jose: no signature algorithms specified") + } if parsed.Payload == nil { return nil, fmt.Errorf("go-jose/go-jose: missing payload in JWS message") } @@ -198,6 +234,12 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { return nil, err } + alg := SignatureAlgorithm(signature.Header.Algorithm) + if !containsSignatureAlgorithm(signatureAlgorithms, alg) { + return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", + alg, signatureAlgorithms) + } + if signature.header != nil { signature.Unprotected, err = signature.header.sanitized() if err != nil { @@ -241,6 +283,12 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { return nil, err } + alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm) + if !containsSignatureAlgorithm(signatureAlgorithms, alg) { + return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", + alg, signatureAlgorithms) + } + if obj.Signatures[i].header != nil { obj.Signatures[i].Unprotected, err = obj.Signatures[i].header.sanitized() if err != nil { @@ -274,7 +322,11 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { } // parseSignedCompact parses a message in compact format. -func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) { +func parseSignedCompact( + input string, + payload []byte, + signatureAlgorithms []SignatureAlgorithm, +) (*JSONWebSignature, error) { parts := strings.Split(input, ".") if len(parts) != 3 { return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") @@ -306,7 +358,7 @@ func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) Protected: newBuffer(rawProtected), Signature: newBuffer(signature), } - return raw.sanitized() + return raw.sanitized(signatureAlgorithms) } func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) { @@ -314,15 +366,18 @@ func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) { return "", ErrNotSupported } - serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected)) - payload := "" - signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature) + serializedProtected := mustSerializeJSON(obj.Signatures[0].protected) + var payload []byte if !detached { - payload = base64.RawURLEncoding.EncodeToString(obj.payload) + payload = obj.payload } - return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil + return base64JoinWithDots( + serializedProtected, + payload, + obj.Signatures[0].Signature, + ), nil } // CompactSerialize serializes an object using the compact serialization format. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/opaque.go b/vendor/github.com/go-jose/go-jose/v4/opaque.go similarity index 95% rename from vendor/gopkg.in/go-jose/go-jose.v2/opaque.go rename to vendor/github.com/go-jose/go-jose/v4/opaque.go index fc3e8d2e..42942723 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v4/opaque.go @@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig } // OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key. +// +// Note: this cannot currently be implemented outside this package because of its +// unexported method. type OpaqueKeyEncrypter interface { // KeyID returns the kid KeyID() string @@ -121,7 +124,7 @@ func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipie return oke.encrypter.encryptKey(cek, alg) } -//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. +// OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. type OpaqueKeyDecrypter interface { DecryptKey(encryptedKey []byte, header Header) ([]byte, error) } diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/shared.go b/vendor/github.com/go-jose/go-jose/v4/shared.go similarity index 93% rename from vendor/gopkg.in/go-jose/go-jose.v2/shared.go rename to vendor/github.com/go-jose/go-jose/v4/shared.go index 0fa66a44..1ec33961 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/shared.go +++ b/vendor/github.com/go-jose/go-jose/v4/shared.go @@ -23,7 +23,7 @@ import ( "errors" "fmt" - "gopkg.in/go-jose/go-jose.v2/json" + "github.com/go-jose/go-jose/v4/json" ) // KeyAlgorithm represents a key management algorithm. @@ -71,6 +71,12 @@ var ( // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a // nonce header parameter was included in an unprotected header object. ErrUnprotectedNonce = errors.New("go-jose/go-jose: Nonce parameter included in unprotected header") + + // ErrMissingX5cHeader indicates that the JWT header is missing x5c headers. + ErrMissingX5cHeader = errors.New("go-jose/go-jose: no x5c header present in message") + + // ErrUnsupportedEllipticCurve indicates unsupported or unknown elliptic curve has been found. + ErrUnsupportedEllipticCurve = errors.New("go-jose/go-jose: unsupported/unknown elliptic curve") ) // Key management algorithms @@ -133,8 +139,8 @@ const ( type HeaderKey string const ( - HeaderType HeaderKey = "typ" // string - HeaderContentType = "cty" // string + HeaderType = "typ" // string + HeaderContentType = "cty" // string // These are set by go-jose and shouldn't need to be set by consumers of the // library. @@ -183,8 +189,13 @@ type Header struct { // Unverified certificate chain parsed from x5c header. certificates []*x509.Certificate - // Any headers not recognised above get unmarshalled - // from JSON in a generic manner and placed in this map. + // At parse time, each header parameter with a name other than "kid", + // "jwk", "alg", "nonce", or "x5c" will have its value passed to + // [json.Unmarshal] to unmarshal it into an interface value. + // The resulting value will be stored in this map, with the header + // parameter name as the key. + // + // [json.Unmarshal]: https://pkg.go.dev/encoding/json#Unmarshal ExtraHeaders map[HeaderKey]interface{} } @@ -194,7 +205,7 @@ type Header struct { // not be validated with the given verify options. func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) { if len(h.certificates) == 0 { - return nil, errors.New("go-jose/go-jose: no x5c header present in message") + return nil, ErrMissingX5cHeader } leaf := h.certificates[0] @@ -452,8 +463,8 @@ func parseCertificateChain(chain []string) ([]*x509.Certificate, error) { return out, nil } -func (dst rawHeader) isSet(k HeaderKey) bool { - dvr := dst[k] +func (parsed rawHeader) isSet(k HeaderKey) bool { + dvr := parsed[k] if dvr == nil { return false } @@ -472,17 +483,17 @@ func (dst rawHeader) isSet(k HeaderKey) bool { } // Merge headers from src into dst, giving precedence to headers from l. -func (dst rawHeader) merge(src *rawHeader) { +func (parsed rawHeader) merge(src *rawHeader) { if src == nil { return } for k, v := range *src { - if dst.isSet(k) { + if parsed.isSet(k) { continue } - dst[k] = v + parsed[k] = v } } @@ -496,7 +507,7 @@ func curveName(crv elliptic.Curve) (string, error) { case elliptic.P521(): return "P-521", nil default: - return "", fmt.Errorf("go-jose/go-jose: unsupported/unknown elliptic curve") + return "", ErrUnsupportedEllipticCurve } } diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go similarity index 83% rename from vendor/gopkg.in/go-jose/go-jose.v2/signing.go rename to vendor/github.com/go-jose/go-jose/v4/signing.go index 5195a1a9..3dec0112 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/signing.go +++ b/vendor/github.com/go-jose/go-jose/v4/signing.go @@ -19,14 +19,13 @@ package jose import ( "bytes" "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "encoding/base64" "errors" "fmt" - "golang.org/x/crypto/ed25519" - - "gopkg.in/go-jose/go-jose.v2/json" + "github.com/go-jose/go-jose/v4/json" ) // NonceSource represents a source of random nonces to go into JWS objects @@ -41,6 +40,20 @@ type Signer interface { } // SigningKey represents an algorithm/key used to sign a message. +// +// Key must have one of these types: +// - ed25519.PrivateKey +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that satisfies the OpaqueSigner interface +// +// If the key is an HMAC key, it must have at least as many bytes as the relevant hash output: +// - HS256: 32 bytes +// - HS384: 48 bytes +// - HS512: 64 bytes type SigningKey struct { Algorithm SignatureAlgorithm Key interface{} @@ -53,12 +66,22 @@ type SignerOptions struct { // Optional map of additional keys to be inserted into the protected header // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated SignerOptions. +// +// The v argument will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions { if so.ExtraHeaders == nil { so.ExtraHeaders = map[HeaderKey]interface{}{} @@ -174,11 +197,11 @@ func newVerifier(verificationKey interface{}) (payloadVerifier, error) { return newVerifier(verificationKey.Key) case *JSONWebKey: return newVerifier(verificationKey.Key) + case OpaqueVerifier: + return &opaqueVerifier{verifier: verificationKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if ov, ok := verificationKey.(OpaqueVerifier); ok { - return &opaqueVerifier{verifier: ov}, nil - } - return nil, ErrUnsupportedKeyType } func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error { @@ -205,11 +228,11 @@ func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipient return newJWKSigner(alg, signingKey) case *JSONWebKey: return newJWKSigner(alg, *signingKey) + case OpaqueSigner: + return newOpaqueSigner(alg, signingKey) + default: + return recipientSigInfo{}, ErrUnsupportedKeyType } - if signer, ok := signingKey.(OpaqueSigner); ok { - return newOpaqueSigner(alg, signer) - } - return recipientSigInfo{}, ErrUnsupportedKeyType } func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) { @@ -322,12 +345,28 @@ func (ctx *genericSigner) Options() SignerOptions { } // Verify validates the signature on the object and returns the payload. -// This function does not support multi-signature, if you desire multi-sig +// This function does not support multi-signature. If you desire multi-signature // verification use VerifyMulti instead. // // Be careful when verifying signatures based on embedded JWKs inside the // payload header. You cannot assume that the key received in a payload is // trusted. +// +// The verificationKey argument must have one of these types: +// - ed25519.PublicKey +// - *ecdsa.PublicKey +// - *rsa.PublicKey +// - *JSONWebKey +// - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet +// - []byte (an HMAC key) +// - Any type that implements the OpaqueVerifier interface. +// +// If the key is an HMAC key, it must have at least as many bytes as the relevant hash output: +// - HS256: 32 bytes +// - HS384: 48 bytes +// - HS512: 64 bytes func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) { err := obj.DetachedVerify(obj.payload, verificationKey) if err != nil { @@ -347,8 +386,15 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // most cases, you will probably want to use Verify instead. DetachedVerify // is only useful if you have a payload and signature that are separated from // each other. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { - verifier, err := newVerifier(verificationKey) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return err + } + verifier, err := newVerifier(key) if err != nil { return err } @@ -388,6 +434,9 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter // returns the index of the signature that was verified, along with the signature // object and the payload. We return the signature and index to guarantee that // callers are getting the verified value. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) { idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey) if err != nil { @@ -405,8 +454,15 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // DetachedVerifyMulti is only useful if you have a payload and signature that are // separated from each other, and the signature can have multiple signers at the // same time. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { - verifier, err := newVerifier(verificationKey) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return -1, Signature{}, err + } + verifier, err := newVerifier(key) if err != nil { return -1, Signature{}, err } @@ -439,3 +495,11 @@ outer: return -1, Signature{}, ErrCryptoFailure } + +func (obj JSONWebSignature) headers() []Header { + headers := make([]Header, len(obj.Signatures)) + for i, sig := range obj.Signatures { + headers[i] = sig.Header + } + return headers +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go b/vendor/github.com/go-jose/go-jose/v4/symmetric.go similarity index 89% rename from vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go rename to vendor/github.com/go-jose/go-jose/v4/symmetric.go index 52c8b62b..a69103b0 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric.go @@ -31,20 +31,26 @@ import ( "io" "golang.org/x/crypto/pbkdf2" - "gopkg.in/go-jose/go-jose.v2/cipher" + + josecipher "github.com/go-jose/go-jose/v4/cipher" ) -// Random reader (stubbed out in tests) +// RandReader is a cryptographically secure random number generator (stubbed out in tests). var RandReader = rand.Reader const ( // RFC7518 recommends a minimum of 1,000 iterations: - // https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // - https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // // NIST recommends a minimum of 10,000: - // https://pages.nist.gov/800-63-3/sp800-63b.html - // 1Password uses 100,000: - // https://support.1password.com/pbkdf2/ - defaultP2C = 100000 + // - https://pages.nist.gov/800-63-3/sp800-63b.html + // + // 1Password increased in 2023 from 100,000 to 650,000: + // - https://support.1password.com/pbkdf2/ + // + // OWASP recommended 600,000 in Dec 2022: + // - https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 + defaultP2C = 600000 // Default salt size: 128 bits defaultP2SSize = 16 ) @@ -278,8 +284,14 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie } header := &rawHeader{} - header.set(headerIV, newBuffer(parts.iv)) - header.set(headerTag, newBuffer(parts.tag)) + + if err = header.set(headerIV, newBuffer(parts.iv)); err != nil { + return recipientInfo{}, err + } + + if err = header.set(headerTag, newBuffer(parts.tag)); err != nil { + return recipientInfo{}, err + } return recipientInfo{ header: header, @@ -332,8 +344,14 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie } header := &rawHeader{} - header.set(headerP2C, ctx.p2c) - header.set(headerP2S, newBuffer(ctx.p2s)) + + if err = header.set(headerP2C, ctx.p2c); err != nil { + return recipientInfo{}, err + } + + if err = header.set(headerP2S, newBuffer(ctx.p2s)); err != nil { + return recipientInfo{}, err + } return recipientInfo{ encryptedKey: jek, @@ -436,7 +454,7 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { mac, err := ctx.hmac(payload, alg) if err != nil { - return Signature{}, errors.New("go-jose/go-jose: failed to compute hmac") + return Signature{}, err } return Signature{ @@ -468,12 +486,24 @@ func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureA func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) { var hash func() hash.Hash + // https://datatracker.ietf.org/doc/html/rfc7518#section-3.2 + // A key of the same size as the hash output (for instance, 256 bits for + // "HS256") or larger MUST be used switch alg { case HS256: + if len(ctx.key)*8 < 256 { + return nil, ErrInvalidKeySize + } hash = sha256.New case HS384: + if len(ctx.key)*8 < 384 { + return nil, ErrInvalidKeySize + } hash = sha512.New384 case HS512: + if len(ctx.key)*8 < 512 { + return nil, ErrInvalidKeySize + } hash = sha512.New default: return nil, ErrUnsupportedAlgorithm diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a..7c7f0c69 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f..30568e76 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) } - buf.WriteString(f.parentValuesStr) - continuing = true - } + f.flatten(buf, vals, true) // escape user-provided keys - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } - if f.valuesStr != "" { + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.valuesStr) - continuing = true + buf.WriteString(bodyStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true } - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/google/certificate-transparency-go/.golangci.yaml b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml index 6cd9695e..405740a1 100644 --- a/vendor/github.com/google/certificate-transparency-go/.golangci.yaml +++ b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml @@ -18,5 +18,11 @@ linters-settings: issues: exclude-use-default: false - exclude: - - "Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*printf?|os\\.(Un)?Setenv). is not checked" + exclude-rules: + # The following grpc linters are excluded because grpc.Dial, grpc.DialContext and grpc.WithBlock will be supported throughout 1.x. + - linters: [staticcheck] + text: 'SA1019: grpc.Dial is deprecated: use NewClient instead' + - linters: [staticcheck] + text: 'SA1019: grpc.DialContext is deprecated: use NewClient instead' + - linters: [staticcheck] + text: 'SA1019: grpc.WithBlock is deprecated: this DialOption is not supported by NewClient' diff --git a/vendor/github.com/google/certificate-transparency-go/AUTHORS b/vendor/github.com/google/certificate-transparency-go/AUTHORS index 942cec9b..ad514665 100644 --- a/vendor/github.com/google/certificate-transparency-go/AUTHORS +++ b/vendor/github.com/google/certificate-transparency-go/AUTHORS @@ -9,8 +9,8 @@ # Please keep the list sorted. Alex Cohn -Comodo CA Limited Ed Maste +Elisha Silas Fiaz Hossain Google LLC Internet Security Research Group @@ -23,6 +23,7 @@ Nicholas Galbreath Oliver Weidner PrimeKey Solutions AB Ruslan Kovalov +Sectigo Limited Venafi, Inc. Vladimir Rutsky Ximin Luo diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md index e286ae20..5cb7b7d4 100644 --- a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md +++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -2,6 +2,323 @@ ## HEAD +## v1.3.1 + +* Add AllLogListSignatureURL by @AlexLaroche in https://github.com/google/certificate-transparency-go/pull/1634 +* Add TiledLogs to log list JSON by @mcpherrinm in https://github.com/google/certificate-transparency-go/pull/1635 +* chore: relax go directive to permit 1.22.x by @dnwe in https://github.com/google/certificate-transparency-go/pull/1640 + +### Dependency Update + +* Bump github.com/fullstorydev/grpcurl from 1.9.1 to 1.9.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1627 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1628 +* Bump the docker-deps group across 5 directories with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1630 +* Bump github/codeql-action from 3.27.5 to 3.27.6 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1629 +* Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in the go_modules group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1631 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1633 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1632 +* Bump the docker-deps group across 4 directories with 1 update by @dependabot in https://github.com/google/certificate-transparency-go/pull/1638 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1637 +* Bump the all-deps group across 1 directory with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1641 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1643 +* Bump google.golang.org/grpc from 1.69.2 to 1.69.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1642 + +## v1.3.0 + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +This feature now supports PostgreSQL, in addition to the support for MySQL/MariaDB that was added in [v1.2.0](#v1.2.0). + +Log operators can choose to enable this feature for new PostgreSQL-based CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/postgresql/schema.sql). The other available options are documented in the [v1.2.0](#v1.2.0) changelog entry. + +This change is tested in Cloud Build tests using the `postgres:17` Docker image as of the time of writing. + +* Add IssuanceChainStorage PostgreSQL implementation by @robstradling in https://github.com/google/certificate-transparency-go/pull/1618 + +### Misc + +* [Dependabot] Update all docker images in one PR by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1614 +* Explicitly include version tag by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1617 +* Add empty cloudbuild_postgresql.yaml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1623 + +### Dependency update + +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1609 +* Bump golang from 1.23.2-bookworm to 1.23.3-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1611 +* Bump github/codeql-action from 3.27.0 to 3.27.1 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1610 +* Bump golang from 1.23.2-bookworm to 1.23.3-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1612 +* Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 in the go_modules group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1613 +* Bump the docker-deps group across 3 directories with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1616 +* Bump github/codeql-action from 3.27.1 to 3.27.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1615 +* Bump the docker-deps group across 4 directories with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1622 +* Bump github/codeql-action from 3.27.2 to 3.27.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1620 +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1621 +* Bump github.com/google/trillian from 1.6.1 to 1.7.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1624 +* Bump github/codeql-action from 3.27.4 to 3.27.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1625 + +## v1.2.2 + +* Recommended Go version for development: 1.22 + * Using a different version can lead to presubmits failing due to unexpected diffs. + +### Add TLS Support + +Add TLS support for Trillian: By using `--trillian_tls_ca_cert_file` flag, users can provide a CA certificate, that is used to establish a secure communication with Trillian log server. + +Add TLS support for ct_server: By using `--tls_certificate` and `--tls_key` flags, users can provide a service certificate and key, that enables the server to handle HTTPS requests. + +* Add TLS support for CTLog server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1523 +* Add TLS support for migrillian by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1525 +* fix TLS configuration for ct_server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1542 +* Add Trillian TLS support for ct_server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1551 + +### HTTP Idle Connection Timeout Flag + +A new flag `http_idle_timeout` is added to set the HTTP server's idle timeout value in the ct_server binary. This controls the maximum amount of time to wait for the next request when keep-alives are enabled. + +* add flag for HTTP idle connection timeout value by @bobcallaway in https://github.com/google/certificate-transparency-go/pull/1597 + +### Misc + +* Refactor issuance chain service by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1512 +* Use the version in the go.mod file for vuln checks by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1528 + +### Fixes + +* Fix failed tests on 32-bit OS by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1540 + +### Dependency update + +* Bump go.etcd.io/etcd/v3 from 3.5.13 to 3.5.14 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1500 +* Bump github/codeql-action from 3.25.6 to 3.25.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1501 +* Bump golang.org/x/net from 0.25.0 to 0.26.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1503 +* Group dependabot updates as much as possible by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1506 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1507 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1511 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1510 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1509 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1508 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1516 +* Bump golang from `aec4784` to `9678844` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1518 +* Bump alpine from 3.19 to 3.20 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1492 +* Bump golang from `aec4784` to `9678844` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1517 +* Bump golang from `aec4784` to `9678844` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1513 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1515 +* Bump golang from `aec4784` to `9678844` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1514 +* Bump alpine from `77726ef` to `b89d9c9` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1519 +* Bump k8s.io/klog/v2 from 2.130.0 to 2.130.1 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1521 +* Bump alpine from `77726ef` to `b89d9c9` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1520 +* Bump github/codeql-action from 3.25.10 to 3.25.11 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1526 +* Bump version of go used by the vuln checker by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1527 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1530 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1531 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1532 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1533 +* Bump actions/upload-artifact from 4.3.3 to 4.3.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1534 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1535 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1536 +* Bump github/codeql-action from 3.25.12 to 3.25.13 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1538 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1537 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1543 +* Bump golang from `6c27802` to `af9b40f` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1544 +* Bump golang from `6c27802` to `af9b40f` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1548 +* Bump golang from `6c27802` to `af9b40f` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1547 +* Bump alpine from `b89d9c9` to `0a4eaa0` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1546 +* Bump the all-deps group in /internal/witness/cmd/feeder with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1545 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1549 +* Bump golang.org/x/time from 0.5.0 to 0.6.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1550 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1552 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1553 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1554 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1555 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1556 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1557 +* Bump github.com/prometheus/client_golang from 1.19.1 to 1.20.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1559 +* Bump github/codeql-action from 3.26.0 to 3.26.3 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1561 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1558 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1563 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1560 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1562 +* Bump go version to 1.22.6 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1564 +* Bump github.com/prometheus/client_golang from 1.20.0 to 1.20.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1565 +* Bump github/codeql-action from 3.26.3 to 3.26.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1566 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1568 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1569 +* Bump go from 1.22.6 to 1.22.7 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1574 +* Bump alpine from `0a4eaa0` to `beefdbd` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1571 +* Bump the all-deps group across 1 directory with 5 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1577 +* Bump golang from 1.23.0-bookworm to 1.23.1-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1575 +* Bump golang from 1.23.0-bookworm to 1.23.1-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1576 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1572 +* Bump the all-deps group in /internal/witness/cmd/feeder with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1573 +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1578 +* Bump github/codeql-action from 3.26.6 to 3.26.7 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1579 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1580 +* Bump github/codeql-action from 3.26.7 to 3.26.8 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1581 +* Bump distroless/base-debian12 from `c925d12` to `88e0a2a` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1582 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1585 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1583 +* Bump golang from `1a5326b` to `dba79eb` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1584 +* Bump golang from `1a5326b` to `dba79eb` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1587 +* Bump golang from `1a5326b` to `dba79eb` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1586 +* Bump the all-deps group with 5 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1588 +* Bump the all-deps group with 6 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1589 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1593 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1592 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1591 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1590 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1595 +* Bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1598 +* Bump golang from `18d2f94` to `2341ddf` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1602 +* Bump golang from `18d2f94` to `2341ddf` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1599 +* Bump golang from `18d2f94` to `2341ddf` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1600 +* Bump golang from `18d2f94` to `2341ddf` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1601 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1603 +* Bump distroless/base-debian12 from `6ae5fe6` to `8fe31fb` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1604 + +## v1.2.1 + +### Fixes + +* Fix Go potential bugs and maintainability by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1496 + +### Dependency update + +* Bump google.golang.org/grpc from 1.63.2 to 1.64.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1482 + +## v1.2.0 + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +To reduce CT/Trillian database storage by deduplication of the entire issuance chain (intermediate certificate(s) and root certificate) that is currently stored in the Trillian merkle tree leaf ExtraData field. Storage cost should be reduced by at least 33% for new CT logs with this feature enabled. Currently only MySQL/MariaDB is supported to store the issuance chain in the CTFE database. + +Existing logs are not affected by this change. + +Log operators can choose to opt-in this change for new CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/mysql/schema.sql). See [example](trillian/examples/deployment/docker/ctfe/ct_server.cfg). + +- `ctfe_storage_connection_string` +- `extra_data_issuance_chain_storage_backend` + +An optional LRU cache can be enabled by providing the following flags. + +- `cache_type` +- `cache_size` +- `cache_ttl` + +This change is tested in Cloud Build tests using the `mysql:8.4` Docker image as of the time of writing. + +* Add issuance chain storage interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1430 +* Add issuance chain cache interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1431 +* Add CTFE extra data storage saving configs to config.proto by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1432 +* Add new types `PrecertChainEntryHash` and `CertificateChainHash` for TLS marshal/unmarshal in storage saving by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1435 +* Add IssuanceChainCache LRU implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1454 +* Add issuance chain service by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1452 +* Add CTFE extra data storage saving configs validation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1456 +* Add IssuanceChainStorage MySQL implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1462 +* Fix errcheck lint in mysql test by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1464 +* CTFE Extra Data Issuance Chain Deduplication by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1477 +* Fix incorrect deployment doc and server config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1494 + +### Submission proxy: Root compatibility checking + +* Adds the ability for a CT client to disable root compatibile checking by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1258 + +### Fixes + +* Return 429 Too Many Requests for gRPC error code `ResourceExhausted` from Trillian by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1401 +* Safeguard against redirects on PUT request by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1418 +* Fix CT client upload to be safe against no-op POSTs by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1424 + +### Misc + +* Prefix errors.New variables with the word "Err" by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1399 +* Remove lint exceptions and fix remaining issues by @silaselisha in https://github.com/google/certificate-transparency-go/pull/1438 +* Fix invalid Go toolchain version by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1471 +* Regenerate proto files by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1489 + +### Dependency update + +* Bump distroless/base-debian12 from `5eae9ef` to `28a7f1f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1388 +* Bump github/codeql-action from 3.24.6 to 3.24.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1389 +* Bump actions/checkout from 4.1.1 to 4.1.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1390 +* Bump golang from `6699d28` to `7f9c058` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1391 +* Bump golang from `6699d28` to `7f9c058` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1392 +* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1393 +* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1394 +* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1395 +* Bump golang from `7f9c058` to `d996c64` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1396 +* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1397 +* Bump golang from `7f9c058` to `d996c64` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1398 +* Bump github/codeql-action from 3.24.7 to 3.24.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1400 +* Bump github/codeql-action from 3.24.8 to 3.24.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1402 +* Bump go.etcd.io/etcd/v3 from 3.5.12 to 3.5.13 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1405 +* Bump distroless/base-debian12 from `28a7f1f` to `611d30d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1406 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1407 +* Bump golang.org/x/net from 0.22.0 to 0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1408 +* update govulncheck go version from 1.21.8 to 1.21.9 by @phbnf in https://github.com/google/certificate-transparency-go/pull/1412 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1409 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1410 +* Bump golang.org/x/crypto from 0.21.0 to 0.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1414 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1411 +* Bump github/codeql-action from 3.24.9 to 3.24.10 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1415 +* Bump golang.org/x/net from 0.23.0 to 0.24.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1416 +* Bump google.golang.org/grpc from 1.62.1 to 1.63.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1417 +* Bump github.com/fullstorydev/grpcurl from 1.8.9 to 1.9.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1419 +* Bump golang from `48b942a` to `3451eec` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1421 +* Bump golang from `48b942a` to `3451eec` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1423 +* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1420 +* Bump golang from `3451eec` to `b03f3ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1426 +* Bump golang from `3451eec` to `b03f3ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1425 +* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1422 +* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1427 +* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1428 +* Bump github/codeql-action from 3.24.10 to 3.25.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1433 +* Bump github/codeql-action from 3.25.0 to 3.25.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1434 +* Bump actions/upload-artifact from 4.3.1 to 4.3.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1436 +* Bump actions/checkout from 4.1.2 to 4.1.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1437 +* Bump actions/upload-artifact from 4.3.2 to 4.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1440 +* Bump github/codeql-action from 3.25.1 to 3.25.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1441 +* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1444 +* Bump golang from `b03f3ba` to `d0902ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1443 +* Bump github.com/rs/cors from 1.10.1 to 1.11.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1442 +* Bump golang from `b03f3ba` to `d0902ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1447 +* Bump actions/checkout from 4.1.3 to 4.1.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1446 +* Bump github/codeql-action from 3.25.2 to 3.25.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1449 +* Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1448 +* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1445 +* Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1451 +* Bump distroless/base-debian12 from `611d30d` to `d8d01e2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1450 +* Bump google.golang.org/protobuf from 1.33.1-0.20240408130810-98873a205002 to 1.34.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1453 +* Bump actions/setup-go from 5.0.0 to 5.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1455 +* Bump golang.org/x/net from 0.24.0 to 0.25.0 and golang.org/x/crypto from v0.22.0 to v0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1457 +* Bump google.golang.org/protobuf from 1.34.0 to 1.34.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1458 +* Bump distroless/base-debian12 from `d8d01e2` to `786007f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1461 +* Bump golangci/golangci-lint-action from 5.1.0 to 5.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1460 +* Bump `go-version-input` to 1.21.10 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1472 +* Bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1473 +* Bump actions/checkout from 4.1.4 to 4.1.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1469 +* Bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1465 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1466 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1463 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1470 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1467 +* Bump github/codeql-action from 3.25.3 to 3.25.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1474 +* Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1475 +* Bump ossf/scorecard-action from 2.3.1 to 2.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1476 +* Bump github/codeql-action from 3.25.4 to 3.25.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1478 +* Bump golang from `6d71b7c` to `ef27a3c` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1480 +* Bump golang from `6d71b7c` to `ef27a3c` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1481 +* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1479 +* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1483 +* Bump golang from `ef27a3c` to `5c56bd4` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1484 +* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1485 +* Bump golang from `ef27a3c` to `5c56bd4` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1486 +* Bump actions/checkout from 4.1.5 to 4.1.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1487 +* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1488 +* Bump github/codeql-action from 3.25.5 to 3.25.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1490 +* Bump alpine from `c5b1261` to `58d02b4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1491 +* Bump alpine from `58d02b4` to `77726ef` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1493 + ## v1.1.8 * Recommended Go version for development: 1.21 diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS index 4d04dc6a..3a98a7e1 100644 --- a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS +++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS @@ -30,6 +30,7 @@ Chris Kennelly David Drysdale Deyan Bektchiev Ed Maste +Elisha Silas Emilia Kasper Eran Messeri Fiaz Hossain @@ -51,7 +52,7 @@ Paul Lietar Pavel Kalinnikov Pierre Phaneuf Rob Percival -Rob Stradling +Rob Stradling Roger Ng Roland Shoemaker Ruslan Kovalov diff --git a/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/google/certificate-transparency-go/README.md index ac71ebc1..b528c557 100644 --- a/vendor/github.com/google/certificate-transparency-go/README.md +++ b/vendor/github.com/google/certificate-transparency-go/README.md @@ -6,7 +6,7 @@ This repository holds Go code related to [Certificate Transparency](https://www.certificate-transparency.org/) (CT). The -repository requires Go version 1.21. +repository requires Go version 1.22. - [Repository Structure](#repository-structure) - [Trillian CT Personality](#trillian-ct-personality) diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go new file mode 100644 index 00000000..a2fed51d --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go @@ -0,0 +1,278 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v3.20.1 +// source: client/configpb/multilog.proto + +package configpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +type TemporalLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *TemporalLogConfig) Reset() { + *x = TemporalLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_client_configpb_multilog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TemporalLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TemporalLogConfig) ProtoMessage() {} + +func (x *TemporalLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_client_configpb_multilog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TemporalLogConfig.ProtoReflect.Descriptor instead. +func (*TemporalLogConfig) Descriptor() ([]byte, []int) { + return file_client_configpb_multilog_proto_rawDescGZIP(), []int{0} +} + +func (x *TemporalLogConfig) GetShard() []*LogShardConfig { + if x != nil { + return x.Shard + } + return nil +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +type LogShardConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // The log's public key in DER-encoded PKIX form. + PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"` + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + NotAfterStart *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"` + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + NotAfterLimit *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"` +} + +func (x *LogShardConfig) Reset() { + *x = LogShardConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_client_configpb_multilog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogShardConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogShardConfig) ProtoMessage() {} + +func (x *LogShardConfig) ProtoReflect() protoreflect.Message { + mi := &file_client_configpb_multilog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogShardConfig.ProtoReflect.Descriptor instead. +func (*LogShardConfig) Descriptor() ([]byte, []int) { + return file_client_configpb_multilog_proto_rawDescGZIP(), []int{1} +} + +func (x *LogShardConfig) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *LogShardConfig) GetPublicKeyDer() []byte { + if x != nil { + return x.PublicKeyDer + } + return nil +} + +func (x *LogShardConfig) GetNotAfterStart() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterStart + } + return nil +} + +func (x *LogShardConfig) GetNotAfterLimit() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterLimit + } + return nil +} + +var File_client_configpb_multilog_proto protoreflect.FileDescriptor + +var file_client_configpb_multilog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, + 0x62, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x11, 0x54, + 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x22, 0xd0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x6e, + 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x42, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x2d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x6c, 0x6f, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_client_configpb_multilog_proto_rawDescOnce sync.Once + file_client_configpb_multilog_proto_rawDescData = file_client_configpb_multilog_proto_rawDesc +) + +func file_client_configpb_multilog_proto_rawDescGZIP() []byte { + file_client_configpb_multilog_proto_rawDescOnce.Do(func() { + file_client_configpb_multilog_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_configpb_multilog_proto_rawDescData) + }) + return file_client_configpb_multilog_proto_rawDescData +} + +var file_client_configpb_multilog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_client_configpb_multilog_proto_goTypes = []interface{}{ + (*TemporalLogConfig)(nil), // 0: configpb.TemporalLogConfig + (*LogShardConfig)(nil), // 1: configpb.LogShardConfig + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp +} +var file_client_configpb_multilog_proto_depIdxs = []int32{ + 1, // 0: configpb.TemporalLogConfig.shard:type_name -> configpb.LogShardConfig + 2, // 1: configpb.LogShardConfig.not_after_start:type_name -> google.protobuf.Timestamp + 2, // 2: configpb.LogShardConfig.not_after_limit:type_name -> google.protobuf.Timestamp + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_client_configpb_multilog_proto_init() } +func file_client_configpb_multilog_proto_init() { + if File_client_configpb_multilog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_client_configpb_multilog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemporalLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_client_configpb_multilog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogShardConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_client_configpb_multilog_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_client_configpb_multilog_proto_goTypes, + DependencyIndexes: file_client_configpb_multilog_proto_depIdxs, + MessageInfos: file_client_configpb_multilog_proto_msgTypes, + }.Build() + File_client_configpb_multilog_proto = out.File + file_client_configpb_multilog_proto_rawDesc = nil + file_client_configpb_multilog_proto_goTypes = nil + file_client_configpb_multilog_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto new file mode 100644 index 00000000..0774c35e --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto @@ -0,0 +1,45 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package configpb; + +option go_package = "github.com/google/certificate-transparency-go/client/multilog/configpb"; + +import "google/protobuf/timestamp.proto"; + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +message TemporalLogConfig { + repeated LogShardConfig shard = 1; +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +message LogShardConfig { + string uri = 1; + + // The log's public key in DER-encoded PKIX form. + bytes public_key_der = 2; + + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + google.protobuf.Timestamp not_after_start = 3; + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + google.protobuf.Timestamp not_after_limit = 4; +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/getentries.go b/vendor/github.com/google/certificate-transparency-go/client/getentries.go new file mode 100644 index 00000000..103dc815 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/getentries.go @@ -0,0 +1,68 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "errors" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +// GetRawEntries exposes the /ct/v1/get-entries result with only the JSON parsing done. +func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.GetEntriesResponse, error) { + if end < 0 { + return nil, errors.New("end should be >= 0") + } + if end < start { + return nil, errors.New("start should be <= end") + } + + params := map[string]string{ + "start": strconv.FormatInt(start, 10), + "end": strconv.FormatInt(end, 10), + } + + var resp ct.GetEntriesResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +// GetEntries attempts to retrieve the entries in the sequence [start, end] from the CT log server +// (RFC6962 s4.6) as parsed [pre-]certificates for convenience, held in a slice of ct.LogEntry structures. +// However, this does mean that any certificate parsing failures will cause a failure of the whole +// retrieval operation; for more robust retrieval of parsed certificates, use GetRawEntries() and invoke +// ct.LogEntryFromLeaf() on each individual entry. +func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogEntry, error) { + resp, err := c.GetRawEntries(ctx, start, end) + if err != nil { + return nil, err + } + entries := make([]ct.LogEntry, len(resp.Entries)) + for i, entry := range resp.Entries { + index := start + int64(i) + logEntry, err := ct.LogEntryFromLeaf(index, &entry) + if x509.IsFatal(err) { + return nil, err + } + entries[i] = *logEntry + } + return entries, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/logclient.go b/vendor/github.com/google/certificate-transparency-go/client/logclient.go new file mode 100644 index 00000000..7842c8e2 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/logclient.go @@ -0,0 +1,226 @@ +// Copyright 2014 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a CT log client implementation and contains types and code +// for interacting with RFC6962-compliant CT Log instances. +// See http://tools.ietf.org/html/rfc6962 for details +package client + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/tls" +) + +// LogClient represents a client for a given CT Log instance +type LogClient struct { + jsonclient.JSONClient +} + +// CheckLogClient is an interface that allows (just) checking of various log contents. +type CheckLogClient interface { + BaseURI() string + GetSTH(context.Context) (*ct.SignedTreeHead, error) + GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) + GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) +} + +// New constructs a new LogClient instance. +// |uri| is the base URI of the CT log instance to interact with, e.g. +// https://ct.googleapis.com/pilot +// |hc| is the underlying client to be used for HTTP requests to the CT log. +// |opts| can be used to provide a custom logger interface and a public key +// for signature verification. +func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) { + logClient, err := jsonclient.New(uri, hc, opts) + if err != nil { + return nil, err + } + return &LogClient{*logClient}, err +} + +// RspError represents a server error including HTTP information. +type RspError = jsonclient.RspError + +// Attempts to add |chain| to the log, using the api end-point specified by +// |path|. If provided context expires before submission is complete an +// error will be returned. +func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + var resp ct.AddChainResponse + var req ct.AddChainRequest + for _, link := range chain { + req.Chain = append(req.Chain, link.Data) + } + + httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp) + if err != nil { + return nil, err + } + + var ds ct.DigitallySigned + if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } else if len(rest) > 0 { + return nil, RspError{ + Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + exts, err := base64.StdEncoding.DecodeString(resp.Extensions) + if err != nil { + return nil, RspError{ + Err: fmt.Errorf("invalid base64 data in Extensions (%q): %v", resp.Extensions, err), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + var logID ct.LogID + copy(logID.KeyID[:], resp.ID) + sct := &ct.SignedCertificateTimestamp{ + SCTVersion: resp.SCTVersion, + LogID: logID, + Timestamp: resp.Timestamp, + Extensions: ct.CTExtensions(exts), + Signature: ds, + } + if err := c.VerifySCTSignature(*sct, ctype, chain); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sct, nil +} + +// AddChain adds the (DER represented) X509 |chain| to the log. +func (c *LogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate |chain| to the log. +func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +// GetSTH retrieves the current STH from the log. +// Returns a populated SignedTreeHead, or a non-nil error (which may be of type +// RspError if a raw http.Response is available). +func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) { + var resp ct.GetSTHResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp) + if err != nil { + return nil, err + } + + sth, err := resp.ToSignedTreeHead() + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := c.VerifySTHSignature(*sth); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sth, nil +} + +// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is +// successful. +func (c *LogClient) VerifySTHSignature(sth ct.SignedTreeHead) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + return c.Verifier.VerifySTHSignature(sth) +} + +// VerifySCTSignature checks the signature in sct for the given LogEntryType, with associated certificate chain. +func (c *LogClient) VerifySCTSignature(sct ct.SignedCertificateTimestamp, ctype ct.LogEntryType, certData []ct.ASN1Cert) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + leaf, err := ct.MerkleTreeLeafFromRawChain(certData, ctype, sct.Timestamp) + if err != nil { + return fmt.Errorf("failed to build MerkleTreeLeaf: %v", err) + } + entry := ct.LogEntry{Leaf: *leaf} + return c.Verifier.VerifySCTSignature(sct, entry) +} + +// GetSTHConsistency retrieves the consistency proof between two snapshots. +func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) { + base10 := 10 + params := map[string]string{ + "first": strconv.FormatUint(first, base10), + "second": strconv.FormatUint(second, base10), + } + var resp ct.GetSTHConsistencyResponse + if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil { + return nil, err + } + return resp.Consistency, nil +} + +// GetProofByHash returns an audit path for the hash of an SCT. +func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) { + b64Hash := base64.StdEncoding.EncodeToString(hash) + base10 := 10 + params := map[string]string{ + "tree_size": strconv.FormatUint(treeSize, base10), + "hash": b64Hash, + } + var resp ct.GetProofByHashResponse + if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for a log. +func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + var resp ct.GetRootsResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp) + if err != nil { + return nil, err + } + var roots []ct.ASN1Cert + for _, cert64 := range resp.Certificates { + cert, err := base64.StdEncoding.DecodeString(cert64) + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + roots = append(roots, ct.ASN1Cert{Data: cert}) + } + return roots, nil +} + +// GetEntryAndProof returns a log entry and audit path for the index of a leaf. +func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) { + base10 := 10 + params := map[string]string{ + "leaf_index": strconv.FormatUint(index, base10), + "tree_size": strconv.FormatUint(treeSize, base10), + } + var resp ct.GetEntryAndProofResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/multilog.go b/vendor/github.com/google/certificate-transparency-go/client/multilog.go new file mode 100644 index 00000000..afd75a6d --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/multilog.go @@ -0,0 +1,223 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "net/http" + "os" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client/configpb" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/x509" + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" +) + +type interval struct { + lower *time.Time // nil => no lower bound + upper *time.Time // nil => no upper bound +} + +// TemporalLogConfigFromFile creates a TemporalLogConfig object from the given +// filename, which should contain text-protobuf encoded configuration data. +func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, error) { + if len(filename) == 0 { + return nil, errors.New("log config filename empty") + } + + cfgBytes, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("failed to read log config: %v", err) + } + + var cfg configpb.TemporalLogConfig + if txtErr := prototext.Unmarshal(cfgBytes, &cfg); txtErr != nil { + if binErr := proto.Unmarshal(cfgBytes, &cfg); binErr != nil { + return nil, fmt.Errorf("failed to parse TemporalLogConfig from %q as text protobuf (%v) or binary protobuf (%v)", filename, txtErr, binErr) + } + } + + if len(cfg.Shard) == 0 { + return nil, errors.New("empty log config found") + } + return &cfg, nil +} + +// AddLogClient is an interface that allows adding certificates and pre-certificates to a log. +// Both LogClient and TemporalLogClient implement this interface, which allows users to +// commonize code for adding certs to normal/temporal logs. +type AddLogClient interface { + AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) +} + +// TemporalLogClient allows [pre-]certificates to be uploaded to a temporal log. +type TemporalLogClient struct { + Clients []*LogClient + intervals []interval +} + +// NewTemporalLogClient builds a new client for interacting with a temporal log. +// The provided config should be contiguous and chronological. +func NewTemporalLogClient(cfg *configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) { + if len(cfg.GetShard()) == 0 { + return nil, errors.New("empty config") + } + + overall, err := shardInterval(cfg.Shard[0]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[0] invalid: %v", err) + } + intervals := make([]interval, 0, len(cfg.Shard)) + intervals = append(intervals, overall) + for i := 1; i < len(cfg.Shard); i++ { + interval, err := shardInterval(cfg.Shard[i]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[%d] invalid: %v", i, err) + } + if overall.upper == nil { + return nil, fmt.Errorf("cfg.Shard[%d] extends an interval with no upper bound", i) + } + if interval.lower == nil { + return nil, fmt.Errorf("cfg.Shard[%d] has no lower bound but extends an interval", i) + } + if !interval.lower.Equal(*overall.upper) { + return nil, fmt.Errorf("cfg.Shard[%d] starts at %v but previous interval ended at %v", i, interval.lower, overall.upper) + } + overall.upper = interval.upper + intervals = append(intervals, interval) + } + clients := make([]*LogClient, 0, len(cfg.Shard)) + for i, shard := range cfg.Shard { + opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"} + opts.PublicKeyDER = shard.GetPublicKeyDer() + c, err := New(shard.Uri, hc, opts) + if err != nil { + return nil, fmt.Errorf("failed to create client for cfg.Shard[%d]: %v", i, err) + } + clients = append(clients, c) + } + tlc := TemporalLogClient{ + Clients: clients, + intervals: intervals, + } + return &tlc, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for all +// of the shards of a temporal log (i.e. the union). +func (tlc *TemporalLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + type result struct { + roots []ct.ASN1Cert + err error + } + results := make(chan result, len(tlc.Clients)) + for _, c := range tlc.Clients { + go func(c *LogClient) { + var r result + r.roots, r.err = c.GetAcceptedRoots(ctx) + results <- r + }(c) + } + + var allRoots []ct.ASN1Cert + seen := make(map[[sha256.Size]byte]bool) + for range tlc.Clients { + r := <-results + if r.err != nil { + return nil, r.err + } + for _, root := range r.roots { + h := sha256.Sum256(root.Data) + if seen[h] { + continue + } + seen[h] = true + allRoots = append(allRoots, root) + } + } + return allRoots, nil +} + +// AddChain adds the (DER represented) X509 chain to the appropriate log. +func (tlc *TemporalLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate chain to the appropriate log. +func (tlc *TemporalLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +func (tlc *TemporalLogClient) addChain(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + // Parse the first entry in the chain + if len(chain) == 0 { + return nil, errors.New("missing chain") + } + cert, err := x509.ParseCertificate(chain[0].Data) + if err != nil { + return nil, fmt.Errorf("failed to parse initial chain entry: %v", err) + } + cidx, err := tlc.IndexByDate(cert.NotAfter) + if err != nil { + return nil, fmt.Errorf("failed to find log to process cert: %v", err) + } + return tlc.Clients[cidx].addChainWithRetry(ctx, ctype, path, chain) +} + +// IndexByDate returns the index of the Clients entry that is appropriate for the given +// date. +func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) { + for i, interval := range tlc.intervals { + if (interval.lower != nil) && when.Before(*interval.lower) { + continue + } + if (interval.upper != nil) && !when.Before(*interval.upper) { + continue + } + return i, nil + } + return -1, fmt.Errorf("no log found encompassing date %v", when) +} + +func shardInterval(cfg *configpb.LogShardConfig) (interval, error) { + var interval interval + if cfg.NotAfterStart != nil { + if err := cfg.NotAfterStart.CheckValid(); err != nil { + return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err) + } + t := cfg.NotAfterStart.AsTime() + interval.lower = &t + } + if cfg.NotAfterLimit != nil { + if err := cfg.NotAfterLimit.CheckValid(); err != nil { + return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err) + } + t := cfg.NotAfterLimit.AsTime() + interval.upper = &t + } + + if interval.lower != nil && interval.upper != nil && !(*interval.lower).Before(*interval.upper) { + return interval, errors.New("inverted interval") + } + return interval, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml index 6c856c11..50ac5ef7 100644 --- a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml @@ -85,6 +85,13 @@ steps: # Wait for trillian logserver to be up until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export MYSQL_HOST="mysql" + export MYSQL_ROOT_PASSWORD="zaphod" + export MYSQL_USER_HOST="%" + yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose waitFor: ['prepare'] # Run the presubmit tests diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml index 0e47e326..566edfe0 100644 --- a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml @@ -85,6 +85,13 @@ steps: # Wait for trillian logserver to be up until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export MYSQL_HOST="mysql" + export MYSQL_ROOT_PASSWORD="zaphod" + export MYSQL_USER_HOST="%" + yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose waitFor: ['prepare'] # Run the presubmit tests diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml new file mode 100644 index 00000000..37faca72 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml @@ -0,0 +1,161 @@ +############################################################################# +## This file is based on cloudbuild.yaml, but targets PostgreSQL instead of +## MySQL. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest postgresql_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest postgresql_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml pull postgresql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml up -d postgresql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z postgresql_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export POSTGRESQL_HOST="postgresql" + yes | bash "$${CT_GO_PATH}/scripts/resetpgctdb.sh" --verbose + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go new file mode 100644 index 00000000..640fcec9 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go @@ -0,0 +1,211 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ctutil contains utilities for Certificate Transparency. +package ctutil + +import ( + "bytes" + "crypto" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +var emptyHash = [sha256.Size]byte{} + +// LeafHashB64 does as LeafHash does, but returns the leaf hash base64-encoded. +// The base64-encoded leaf hash returned by B64LeafHash can be used with the +// get-proof-by-hash API endpoint of Certificate Transparency Logs. +func LeafHashB64(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (string, error) { + hash, err := LeafHash(chain, sct, embedded) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(hash[:]), nil +} + +// LeafHash calculates the leaf hash of the certificate or precertificate at +// chain[0] that sct was issued for. +// +// sct is required because the SCT timestamp is used to calculate the leaf hash. +// Leaf hashes are unique to (pre)certificate-SCT pairs. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to calculate the leaf hash for a normal X.509 +// certificate then it is enough to just provide the end entity +// certificate in chain. This case assumes that the SCT being provided is +// not embedded within the leaf certificate provided, i.e. the certificate +// is what was submitted to the Certificate Transparency Log in order to +// obtain the SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to calculate the leaf hash for a precertificate +// then the issuing certificate must also be provided in chain. The +// precertificate should be at chain[0], and its issuer at chain[1]. For +// this case, set embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If using this function to calculate the leaf hash for a certificate +// where the SCT provided is embedded within the certificate you +// are providing at chain[0], set embedded to true. LeafHash will +// calculate the leaf hash by building the corresponding precertificate. +// LeafHash will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +// +// Note: LeafHash doesn't check that the provided SCT verifies for the given +// chain. It simply calculates what the leaf hash would be for the given +// (pre)certificate-SCT pair. +func LeafHash(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) ([sha256.Size]byte, error) { + leaf, err := createLeaf(chain, sct, embedded) + if err != nil { + return emptyHash, err + } + return ct.LeafHashForLeaf(leaf) +} + +// VerifySCT takes the public key of a Certificate Transparency Log, a +// certificate chain, and an SCT and verifies whether the SCT is a valid SCT for +// the certificate at chain[0], signed by the Log that the public key belongs +// to. If the SCT does not verify, an error will be returned. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +func VerifySCT(pubKey crypto.PublicKey, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { + s, err := ct.NewSignatureVerifier(pubKey) + if err != nil { + return fmt.Errorf("error creating signature verifier: %s", err) + } + + return VerifySCTWithVerifier(s, chain, sct, embedded) +} + +// VerifySCTWithVerifier takes a ct.SignatureVerifier, a certificate chain, and +// an SCT and verifies whether the SCT is a valid SCT for the certificate at +// chain[0], signed by the Log whose public key was used to set up the +// ct.SignatureVerifier. If the SCT does not verify, an error will be returned. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +func VerifySCTWithVerifier(sv *ct.SignatureVerifier, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { + if sv == nil { + return errors.New("ct.SignatureVerifier is nil") + } + + leaf, err := createLeaf(chain, sct, embedded) + if err != nil { + return err + } + + return sv.VerifySCTSignature(*sct, ct.LogEntry{Leaf: *leaf}) +} + +func createLeaf(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (*ct.MerkleTreeLeaf, error) { + if len(chain) == 0 { + return nil, errors.New("chain is empty") + } + if sct == nil { + return nil, errors.New("sct is nil") + } + + if embedded { + sctPresent, err := ContainsSCT(chain[0], sct) + if err != nil { + return nil, fmt.Errorf("error checking for SCT in leaf certificate: %s", err) + } + if !sctPresent { + return nil, errors.New("SCT provided is not embedded within leaf certificate") + } + } + + certType := ct.X509LogEntryType + if chain[0].IsPrecertificate() || embedded { + certType = ct.PrecertLogEntryType + } + + var leaf *ct.MerkleTreeLeaf + var err error + if embedded { + leaf, err = ct.MerkleTreeLeafForEmbeddedSCT(chain, sct.Timestamp) + } else { + leaf, err = ct.MerkleTreeLeafFromChain(chain, certType, sct.Timestamp) + } + if err != nil { + return nil, fmt.Errorf("error creating MerkleTreeLeaf: %s", err) + } + return leaf, nil +} + +// ContainsSCT checks to see whether the given SCT is embedded within the given +// certificate. +func ContainsSCT(cert *x509.Certificate, sct *ct.SignedCertificateTimestamp) (bool, error) { + if cert == nil || sct == nil { + return false, nil + } + + sctBytes, err := tls.Marshal(*sct) + if err != nil { + return false, fmt.Errorf("error tls.Marshalling SCT: %s", err) + } + for _, s := range cert.SCTList.SCTList { + if bytes.Equal(sctBytes, s.Val) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go new file mode 100644 index 00000000..83d9739c --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go @@ -0,0 +1,174 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ctutil + +import ( + "context" + "crypto/sha256" + "fmt" + "net/http" + "strings" + "sync" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/loglist3" + "github.com/google/certificate-transparency-go/x509" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" +) + +// LogInfo holds the objects needed to perform per-log verification and +// validation of SCTs. +type LogInfo struct { + Description string + Client client.CheckLogClient + MMD time.Duration + Verifier *ct.SignatureVerifier + PublicKey []byte + + mu sync.RWMutex + lastSTH *ct.SignedTreeHead +} + +// NewLogInfo builds a LogInfo object based on a log list entry. +func NewLogInfo(log *loglist3.Log, hc *http.Client) (*LogInfo, error) { + url := log.URL + if !strings.HasPrefix(url, "https://") { + url = "https://" + url + } + lc, err := client.New(url, hc, jsonclient.Options{PublicKeyDER: log.Key, UserAgent: "ct-go-logclient"}) + if err != nil { + return nil, fmt.Errorf("failed to create client for log %q: %v", log.Description, err) + } + return newLogInfo(log, lc) +} + +func newLogInfo(log *loglist3.Log, lc client.CheckLogClient) (*LogInfo, error) { + logKey, err := x509.ParsePKIXPublicKey(log.Key) + if err != nil { + return nil, fmt.Errorf("failed to parse public key data for log %q: %v", log.Description, err) + } + verifier, err := ct.NewSignatureVerifier(logKey) + if err != nil { + return nil, fmt.Errorf("failed to build verifier log %q: %v", log.Description, err) + } + mmd := time.Duration(log.MMD) * time.Second + return &LogInfo{ + Description: log.Description, + Client: lc, + MMD: mmd, + Verifier: verifier, + PublicKey: log.Key, + }, nil +} + +// LogInfoByHash holds LogInfo objects index by the SHA-256 hash of the log's public key. +type LogInfoByHash map[[sha256.Size]byte]*LogInfo + +// LogInfoByKeyHash builds a map of LogInfo objects indexed by their key hashes. +func LogInfoByKeyHash(ll *loglist3.LogList, hc *http.Client) (LogInfoByHash, error) { + return logInfoByKeyHash(ll, hc, NewLogInfo) +} + +func logInfoByKeyHash(ll *loglist3.LogList, hc *http.Client, infoFactory func(*loglist3.Log, *http.Client) (*LogInfo, error)) (map[[sha256.Size]byte]*LogInfo, error) { + result := make(map[[sha256.Size]byte]*LogInfo) + for _, operator := range ll.Operators { + for _, log := range operator.Logs { + h := sha256.Sum256(log.Key) + li, err := infoFactory(log, hc) + if err != nil { + return nil, err + } + result[h] = li + } + } + return result, nil +} + +// LastSTH returns the last STH known for the log. +func (li *LogInfo) LastSTH() *ct.SignedTreeHead { + li.mu.RLock() + defer li.mu.RUnlock() + return li.lastSTH +} + +// SetSTH sets the last STH known for the log. +func (li *LogInfo) SetSTH(sth *ct.SignedTreeHead) { + li.mu.Lock() + defer li.mu.Unlock() + li.lastSTH = sth +} + +// VerifySCTSignature checks the signature in the SCT matches the given leaf (adjusted for the +// timestamp in the SCT) and log. +func (li *LogInfo) VerifySCTSignature(sct ct.SignedCertificateTimestamp, leaf ct.MerkleTreeLeaf) error { + leaf.TimestampedEntry.Timestamp = sct.Timestamp + if err := li.Verifier.VerifySCTSignature(sct, ct.LogEntry{Leaf: leaf}); err != nil { + return fmt.Errorf("failed to verify SCT signature from log %q: %v", li.Description, err) + } + return nil +} + +// VerifyInclusionLatest checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the latest known tree size of the log. If no tree size for the log is known, it will +// be queried. On success, returns the index of the leaf in the log. +func (li *LogInfo) VerifyInclusionLatest(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) { + sth := li.LastSTH() + if sth == nil { + var err error + sth, err = li.Client.GetSTH(ctx) + if err != nil { + return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err) + } + li.SetSTH(sth) + } + return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:]) +} + +// VerifyInclusion checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the current tree size of the log. On success, returns the index of the leaf +// in the log. +func (li *LogInfo) VerifyInclusion(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) { + sth, err := li.Client.GetSTH(ctx) + if err != nil { + return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err) + } + li.SetSTH(sth) + return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:]) +} + +// VerifyInclusionAt checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the given tree size & root hash of the log. On success, returns the index of the +// leaf in the log. +func (li *LogInfo) VerifyInclusionAt(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp, treeSize uint64, rootHash []byte) (int64, error) { + leaf.TimestampedEntry.Timestamp = timestamp + leafHash, err := ct.LeafHashForLeaf(&leaf) + if err != nil { + return -1, fmt.Errorf("failed to create leaf hash: %v", err) + } + + rsp, err := li.Client.GetProofByHash(ctx, leafHash[:], treeSize) + if err != nil { + return -1, fmt.Errorf("failed to GetProofByHash(sct,size=%d): %v", treeSize, err) + } + + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(rsp.LeafIndex), treeSize, leafHash[:], rsp.AuditPath, rootHash); err != nil { + return -1, fmt.Errorf("failed to verify inclusion proof at size %d: %v", treeSize, err) + } + return rsp.LeafIndex, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go new file mode 100644 index 00000000..30932f30 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go @@ -0,0 +1,72 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonclient + +import ( + "sync" + "time" +) + +type backoff struct { + mu sync.RWMutex + multiplier uint + notBefore time.Time +} + +const ( + // maximum backoff is 2^(maxMultiplier-1) = 128 seconds + maxMultiplier = 8 +) + +func (b *backoff) set(override *time.Duration) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + if b.notBefore.After(time.Now()) { + if override != nil { + // If existing backoff is set but override would be longer than + // it then set it to that. + notBefore := time.Now().Add(*override) + if notBefore.After(b.notBefore) { + b.notBefore = notBefore + } + } + return time.Until(b.notBefore) + } + var wait time.Duration + if override != nil { + wait = *override + } else { + if b.multiplier < maxMultiplier { + b.multiplier++ + } + wait = time.Second * time.Duration(1<<(b.multiplier-1)) + } + b.notBefore = time.Now().Add(wait) + return wait +} + +func (b *backoff) decreaseMultiplier() { + b.mu.Lock() + defer b.mu.Unlock() + if b.multiplier > 0 { + b.multiplier-- + } +} + +func (b *backoff) until() time.Time { + b.mu.RLock() + defer b.mu.RUnlock() + return b.notBefore +} diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go new file mode 100644 index 00000000..1dee4cb6 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go @@ -0,0 +1,335 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonclient provides a simple client for fetching and parsing +// JSON CT structures from a log. +package jsonclient + +import ( + "bytes" + "context" + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" + "golang.org/x/net/context/ctxhttp" + "k8s.io/klog/v2" +) + +const maxJitter = 250 * time.Millisecond + +type backoffer interface { + // set adjusts/increases the current backoff interval (typically on retryable failure); + // if the optional parameter is provided, this will be used as the interval if it is greater + // than the currently set interval. Returns the current wait period so that it can be + // logged along with any error message. + set(*time.Duration) time.Duration + // decreaseMultiplier reduces the current backoff multiplier, typically on success. + decreaseMultiplier() + // until returns the time until which the client should wait before making a request, + // it may be in the past in which case it should be ignored. + until() time.Time +} + +// JSONClient provides common functionality for interacting with a JSON server +// that uses cryptographic signatures. +type JSONClient struct { + uri string // the base URI of the server. e.g. https://ct.googleapis/pilot + httpClient *http.Client // used to interact with the server via HTTP + Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available) + logger Logger // interface to use for logging warnings and errors + backoff backoffer // object used to store and calculate backoff information + userAgent string // If set, this is sent as the UserAgent header. +} + +// Logger is a simple logging interface used to log internal errors and warnings +type Logger interface { + // Printf formats and logs a message + Printf(string, ...interface{}) +} + +// Options are the options for creating a new JSONClient. +type Options struct { + // Interface to use for logging warnings and errors, if nil the + // standard library log package will be used. + Logger Logger + // PEM format public key to use for signature verification. + PublicKey string + // DER format public key to use for signature verification. + PublicKeyDER []byte + // UserAgent, if set, will be sent as the User-Agent header with each request. + UserAgent string +} + +// ParsePublicKey parses and returns the public key contained in opts. +// If both opts.PublicKey and opts.PublicKeyDER are set, PublicKeyDER is used. +// If neither is set, nil will be returned. +func (opts *Options) ParsePublicKey() (crypto.PublicKey, error) { + if len(opts.PublicKeyDER) > 0 { + return x509.ParsePKIXPublicKey(opts.PublicKeyDER) + } + + if opts.PublicKey != "" { + pubkey, _ /* keyhash */, rest, err := ct.PublicKeyFromPEM([]byte(opts.PublicKey)) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, errors.New("extra data found after PEM key decoded") + } + return pubkey, nil + } + + return nil, nil +} + +type basicLogger struct{} + +func (bl *basicLogger) Printf(msg string, args ...interface{}) { + log.Printf(msg, args...) +} + +// RspError represents an error that occurred when processing a response from a server, +// and also includes key details from the http.Response that triggered the error. +type RspError struct { + Err error + StatusCode int + Body []byte +} + +// Error formats the RspError instance, focusing on the error. +func (e RspError) Error() string { + return e.Err.Error() +} + +// New constructs a new JSONClient instance, for the given base URI, using the +// given http.Client object (if provided) and the Options object. +// If opts does not specify a public key, signatures will not be verified. +func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) { + pubkey, err := opts.ParsePublicKey() + if err != nil { + return nil, fmt.Errorf("invalid public key: %v", err) + } + + var verifier *ct.SignatureVerifier + if pubkey != nil { + var err error + verifier, err = ct.NewSignatureVerifier(pubkey) + if err != nil { + return nil, err + } + } + + if hc == nil { + hc = new(http.Client) + } + logger := opts.Logger + if logger == nil { + logger = &basicLogger{} + } + return &JSONClient{ + uri: strings.TrimRight(uri, "/"), + httpClient: hc, + Verifier: verifier, + logger: logger, + backoff: &backoff{}, + userAgent: opts.UserAgent, + }, nil +} + +// BaseURI returns the base URI that the JSONClient makes queries to. +func (c *JSONClient) BaseURI() string { + return c.uri +} + +// GetAndParse makes a HTTP GET call to the given path, and attempts to parse +// the response as a JSON representation of the rsp structure. Returns the +// http.Response, the body of the response, and an error (which may be of +// type RspError if the HTTP response was available). +func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a GET request with URL-encoded parameters. + vals := url.Values{} + for k, v := range params { + vals.Add(k, v) + } + fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode()) + klog.V(2).Infof("GET %s", fullURI) + httpReq, err := http.NewRequest(http.MethodGet, fullURI, nil) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + + httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq) + if err != nil { + return nil, nil, err + } + + // Read everything now so http.Client can reuse the connection. + body, err := io.ReadAll(httpRsp.Body) + if err := httpRsp.Body.Close(); err != nil { + return nil, nil, err + } + if err != nil { + return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %v", err), StatusCode: httpRsp.StatusCode, Body: body} + } + + if httpRsp.StatusCode != http.StatusOK { + return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil { + return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + return httpRsp, body, nil +} + +// PostAndParse makes a HTTP POST call to the given path, including the request +// parameters, and attempts to parse the response as a JSON representation of +// the rsp structure. Returns the http.Response, the body of the response, and +// an error (which may be of type RspError if the HTTP response was available). +func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a POST request with JSON body. + postBody, err := json.Marshal(req) + if err != nil { + return nil, nil, err + } + fullURI := fmt.Sprintf("%s%s", c.uri, path) + klog.V(2).Infof("POST %s", fullURI) + httpReq, err := http.NewRequest(http.MethodPost, fullURI, bytes.NewReader(postBody)) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + httpReq.Header.Set("Content-Type", "application/json") + + httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq) + + // Read all of the body, if there is one, so that the http.Client can do Keep-Alive. + var body []byte + if httpRsp != nil { + body, err = io.ReadAll(httpRsp.Body) + if err := httpRsp.Body.Close(); err != nil { + return nil, nil, err + } + } + if err != nil { + if httpRsp != nil { + return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err} + } + return nil, nil, err + } + if httpRsp.Request.Method != http.MethodPost { + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections#permanent_redirections + return nil, nil, fmt.Errorf("POST request to %q was converted to %s request to %q", fullURI, httpRsp.Request.Method, httpRsp.Request.URL) + } + + if httpRsp.StatusCode == http.StatusOK { + if err = json.Unmarshal(body, &rsp); err != nil { + return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err} + } + } + return httpRsp, body, nil +} + +// waitForBackoff blocks until the defined backoff interval or context has expired, if the returned +// not before time is in the past it returns immediately. +func (c *JSONClient) waitForBackoff(ctx context.Context) error { + dur := time.Until(c.backoff.until().Add(time.Millisecond * time.Duration(rand.Intn(int(maxJitter.Seconds()*1000))))) + if dur < 0 { + dur = 0 + } + backoffTimer := time.NewTimer(dur) + select { + case <-ctx.Done(): + return ctx.Err() + case <-backoffTimer.C: + } + return nil +} + +// PostAndParseWithRetry makes a HTTP POST call, but retries (with backoff) on +// retryable errors; the caller should set a deadline on the provided context +// to prevent infinite retries. Return values are as for PostAndParse. +func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + for { + httpRsp, body, err := c.PostAndParse(ctx, path, req, rsp) + if err != nil { + // Don't retry context errors. + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + wait := c.backoff.set(nil) + c.logger.Printf("Request to %s failed, backing-off %s: %s", c.uri, wait, err) + } else { + switch { + case httpRsp.StatusCode == http.StatusOK: + return httpRsp, body, nil + case httpRsp.StatusCode == http.StatusRequestTimeout: + // Request timeout, retry immediately + c.logger.Printf("Request to %s timed out, retrying immediately", c.uri) + case httpRsp.StatusCode == http.StatusServiceUnavailable: + fallthrough + case httpRsp.StatusCode == http.StatusTooManyRequests: + var backoff *time.Duration + // Retry-After may be either a number of seconds as a int or a RFC 1123 + // date string (RFC 7231 Section 7.1.3) + if retryAfter := httpRsp.Header.Get("Retry-After"); retryAfter != "" { + if seconds, err := strconv.Atoi(retryAfter); err == nil { + b := time.Duration(seconds) * time.Second + backoff = &b + } else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil { + b := time.Until(date) + backoff = &b + } + } + wait := c.backoff.set(backoff) + c.logger.Printf("Request to %s failed, backing-off for %s: got HTTP status %s", c.uri, wait, httpRsp.Status) + default: + return nil, nil, RspError{ + StatusCode: httpRsp.StatusCode, + Body: body, + Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)} + } + } + if err := c.waitForBackoff(ctx); err != nil { + return nil, nil, err + } + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go new file mode 100644 index 00000000..34949be0 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go @@ -0,0 +1,129 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loglist3 + +import ( + "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509util" + "k8s.io/klog/v2" +) + +// LogRoots maps Log-URLs (stated at LogList) to the pools of their accepted +// root-certificates. +type LogRoots map[string]*x509util.PEMCertPool + +// Compatible creates a new LogList containing only Logs matching the temporal, +// root-acceptance and Log-status conditions. +func (ll *LogList) Compatible(cert *x509.Certificate, certRoot *x509.Certificate, roots LogRoots) LogList { + active := ll.TemporallyCompatible(cert) + // Do not check root compatbility if roots are not being provided. + if certRoot == nil { + return active + } + return active.RootCompatible(certRoot, roots) +} + +// SelectByStatus creates a new LogList containing only logs with status +// provided from the original. +func (ll *LogList) SelectByStatus(lstats []LogStatus) LogList { + var active LogList + for _, op := range ll.Operators { + activeOp := *op + activeOp.Logs = []*Log{} + for _, l := range op.Logs { + for _, lstat := range lstats { + if l.State.LogStatus() == lstat { + activeOp.Logs = append(activeOp.Logs, l) + break + } + } + } + if len(activeOp.Logs) > 0 { + active.Operators = append(active.Operators, &activeOp) + } + } + return active +} + +// RootCompatible creates a new LogList containing only the logs of original +// LogList that are compatible with the provided cert, according to +// the passed in collection of per-log roots. Logs that are missing from +// the collection are treated as always compatible and included, even if +// an empty cert root is passed in. +// Cert-root when provided is expected to be CA-cert. +func (ll *LogList) RootCompatible(certRoot *x509.Certificate, roots LogRoots) LogList { + var compatible LogList + + // Check whether root is a CA-cert. + if certRoot != nil && !certRoot.IsCA { + klog.Warningf("Compatible method expects fully rooted chain, while last cert of the chain provided is not root") + return compatible + } + + for _, op := range ll.Operators { + compatibleOp := *op + compatibleOp.Logs = []*Log{} + for _, l := range op.Logs { + // If root set is not defined, we treat Log as compatible assuming no + // knowledge of its roots. + if _, ok := roots[l.URL]; !ok { + compatibleOp.Logs = append(compatibleOp.Logs, l) + continue + } + + if certRoot == nil { + continue + } + + // Check root is accepted. + if roots[l.URL].Included(certRoot) { + compatibleOp.Logs = append(compatibleOp.Logs, l) + } + } + if len(compatibleOp.Logs) > 0 { + compatible.Operators = append(compatible.Operators, &compatibleOp) + } + } + return compatible +} + +// TemporallyCompatible creates a new LogList containing only the logs of +// original LogList that are compatible with the provided cert, according to +// NotAfter and TemporalInterval matching. +// Returns empty LogList if nil-cert is provided. +func (ll *LogList) TemporallyCompatible(cert *x509.Certificate) LogList { + var compatible LogList + if cert == nil { + return compatible + } + + for _, op := range ll.Operators { + compatibleOp := *op + compatibleOp.Logs = []*Log{} + for _, l := range op.Logs { + if l.TemporalInterval == nil { + compatibleOp.Logs = append(compatibleOp.Logs, l) + continue + } + if cert.NotAfter.Before(l.TemporalInterval.EndExclusive) && (cert.NotAfter.After(l.TemporalInterval.StartInclusive) || cert.NotAfter.Equal(l.TemporalInterval.StartInclusive)) { + compatibleOp.Logs = append(compatibleOp.Logs, l) + } + } + if len(compatibleOp.Logs) > 0 { + compatible.Operators = append(compatible.Operators, &compatibleOp) + } + } + return compatible +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go new file mode 100644 index 00000000..c5e94f18 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go @@ -0,0 +1,427 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loglist3 allows parsing and searching of the master CT Log list. +// It expects the log list to conform to the v3 schema. +package loglist3 + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + "unicode" + + "github.com/google/certificate-transparency-go/tls" +) + +const ( + // LogListURL has the master URL for Google Chrome's log list. + LogListURL = "https://www.gstatic.com/ct/log_list/v3/log_list.json" + // LogListSignatureURL has the URL for the signature over Google Chrome's log list. + LogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/log_list.sig" + // AllLogListURL has the URL for the list of all known logs. + AllLogListURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.json" + // AllLogListSignatureURL has the URL for the signature over the list of all known logs. + AllLogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.sig" +) + +// Manually mapped from https://www.gstatic.com/ct/log_list/v3/log_list_schema.json + +// LogList holds a collection of CT logs, grouped by operator. +type LogList struct { + // IsAllLogs is set to true if the list contains all known logs, not + // only usable ones. + IsAllLogs bool `json:"is_all_logs,omitempty"` + // Version is the version of the log list. + Version string `json:"version,omitempty"` + // LogListTimestamp is the time at which the log list was published. + LogListTimestamp time.Time `json:"log_list_timestamp,omitempty"` + // Operators is a list of CT log operators and the logs they operate. + Operators []*Operator `json:"operators"` +} + +// Operator holds a collection of CT logs run by the same organisation. +// It also provides information about that organisation, e.g. contact details. +type Operator struct { + // Name is the name of the CT log operator. + Name string `json:"name"` + // Email lists the email addresses that can be used to contact this log + // operator. + Email []string `json:"email"` + // Logs is a list of RFC 6962 CT logs run by this operator. + Logs []*Log `json:"logs"` + // TiledLogs is a list of Static CT API CT logs run by this operator. + TiledLogs []*TiledLog `json:"tiled_logs"` +} + +// Log describes a single RFC 6962 CT log. It is nearly the same as the TiledLog struct, +// but has a single URL field instead of SubmissionURL and MonitoringURL fields. +type Log struct { + // Description is a human-readable string that describes the log. + Description string `json:"description,omitempty"` + // LogID is the SHA-256 hash of the log's public key. + LogID []byte `json:"log_id"` + // Key is the public key with which signatures can be verified. + Key []byte `json:"key"` + // URL is the address of the HTTPS API. + URL string `json:"url"` + // DNS is the address of the DNS API. + DNS string `json:"dns,omitempty"` + // MMD is the Maximum Merge Delay, in seconds. All submitted + // certificates must be incorporated into the log within this time. + MMD int32 `json:"mmd"` + // PreviousOperators is a list of previous operators and the timestamp + // of when they stopped running the log. + PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"` + // State is the current state of the log, from the perspective of the + // log list distributor. + State *LogStates `json:"state,omitempty"` + // TemporalInterval, if set, indicates that this log only accepts + // certificates with a NotAfter date in this time range. + TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"` + // Type indicates the purpose of this log, e.g. "test" or "prod". + Type string `json:"log_type,omitempty"` +} + +// TiledLog describes a Static CT API log. It is nearly the same as the Log struct, +// but has both SubmissionURL and MonitoringURL fields instead of a single URL field. +type TiledLog struct { + // Description is a human-readable string that describes the log. + Description string `json:"description,omitempty"` + // LogID is the SHA-256 hash of the log's public key. + LogID []byte `json:"log_id"` + // Key is the public key with which signatures can be verified. + Key []byte `json:"key"` + // SubmissionURL + SubmissionURL string `json:"submission_url"` + // MonitoringURL + MonitoringURL string `json:"monitoring_url"` + // DNS is the address of the DNS API. + DNS string `json:"dns,omitempty"` + // MMD is the Maximum Merge Delay, in seconds. All submitted + // certificates must be incorporated into the log within this time. + MMD int32 `json:"mmd"` + // PreviousOperators is a list of previous operators and the timestamp + // of when they stopped running the log. + PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"` + // State is the current state of the log, from the perspective of the + // log list distributor. + State *LogStates `json:"state,omitempty"` + // TemporalInterval, if set, indicates that this log only accepts + // certificates with a NotAfter date in this time range. + TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"` + // Type indicates the purpose of this log, e.g. "test" or "prod". + Type string `json:"log_type,omitempty"` +} + +// PreviousOperator holds information about a log operator and the time at which +// they stopped running a log. +type PreviousOperator struct { + // Name is the name of the CT log operator. + Name string `json:"name"` + // EndTime is the time at which the operator stopped running a log. + EndTime time.Time `json:"end_time"` +} + +// TemporalInterval is a time range. +type TemporalInterval struct { + // StartInclusive is the beginning of the time range. + StartInclusive time.Time `json:"start_inclusive"` + // EndExclusive is just after the end of the time range. + EndExclusive time.Time `json:"end_exclusive"` +} + +// LogStatus indicates Log status. +type LogStatus int + +// LogStatus values +const ( + UndefinedLogStatus LogStatus = iota + PendingLogStatus + QualifiedLogStatus + UsableLogStatus + ReadOnlyLogStatus + RetiredLogStatus + RejectedLogStatus +) + +//go:generate stringer -type=LogStatus + +// LogStates are the states that a CT log can be in, from the perspective of a +// user agent. Only one should be set - this is the current state. +type LogStates struct { + // Pending indicates that the log is in the "pending" state. + Pending *LogState `json:"pending,omitempty"` + // Qualified indicates that the log is in the "qualified" state. + Qualified *LogState `json:"qualified,omitempty"` + // Usable indicates that the log is in the "usable" state. + Usable *LogState `json:"usable,omitempty"` + // ReadOnly indicates that the log is in the "readonly" state. + ReadOnly *ReadOnlyLogState `json:"readonly,omitempty"` + // Retired indicates that the log is in the "retired" state. + Retired *LogState `json:"retired,omitempty"` + // Rejected indicates that the log is in the "rejected" state. + Rejected *LogState `json:"rejected,omitempty"` +} + +// LogState contains details on the current state of a CT log. +type LogState struct { + // Timestamp is the time when the state began. + Timestamp time.Time `json:"timestamp"` +} + +// ReadOnlyLogState contains details on the current state of a read-only CT log. +type ReadOnlyLogState struct { + LogState + // FinalTreeHead is the root hash and tree size at which the CT log was + // made read-only. This should never change while the log is read-only. + FinalTreeHead TreeHead `json:"final_tree_head"` +} + +// TreeHead is the root hash and tree size of a CT log. +type TreeHead struct { + // SHA256RootHash is the root hash of the CT log's Merkle tree. + SHA256RootHash []byte `json:"sha256_root_hash"` + // TreeSize is the size of the CT log's Merkle tree. + TreeSize int64 `json:"tree_size"` +} + +// LogStatus method returns Log-status enum value for descriptive struct. +func (ls *LogStates) LogStatus() LogStatus { + switch { + case ls == nil: + return UndefinedLogStatus + case ls.Pending != nil: + return PendingLogStatus + case ls.Qualified != nil: + return QualifiedLogStatus + case ls.Usable != nil: + return UsableLogStatus + case ls.ReadOnly != nil: + return ReadOnlyLogStatus + case ls.Retired != nil: + return RetiredLogStatus + case ls.Rejected != nil: + return RejectedLogStatus + default: + return UndefinedLogStatus + } +} + +// String method returns printable name of the state. +func (ls *LogStates) String() string { + return ls.LogStatus().String() +} + +// Active picks the set-up state. If multiple states are set (not expected) picks one of them. +func (ls *LogStates) Active() (*LogState, *ReadOnlyLogState) { + if ls == nil { + return nil, nil + } + switch { + case ls.Pending != nil: + return ls.Pending, nil + case ls.Qualified != nil: + return ls.Qualified, nil + case ls.Usable != nil: + return ls.Usable, nil + case ls.ReadOnly != nil: + return nil, ls.ReadOnly + case ls.Retired != nil: + return ls.Retired, nil + case ls.Rejected != nil: + return ls.Rejected, nil + default: + return nil, nil + } +} + +// GoogleOperated returns whether Operator is considered to be Google. +func (op *Operator) GoogleOperated() bool { + for _, email := range op.Email { + if strings.Contains(email, "google-ct-logs@googlegroups") { + return true + } + } + return false +} + +// NewFromJSON creates a LogList from JSON encoded data. +func NewFromJSON(llData []byte) (*LogList, error) { + var ll LogList + if err := json.Unmarshal(llData, &ll); err != nil { + return nil, fmt.Errorf("failed to parse log list: %v", err) + } + return &ll, nil +} + +// NewFromSignedJSON creates a LogList from JSON encoded data, checking a +// signature along the way. The signature data should be provided as the +// raw signature data. +func NewFromSignedJSON(llData, rawSig []byte, pubKey crypto.PublicKey) (*LogList, error) { + var sigAlgo tls.SignatureAlgorithm + switch pkType := pubKey.(type) { + case *rsa.PublicKey: + sigAlgo = tls.RSA + case *ecdsa.PublicKey: + sigAlgo = tls.ECDSA + default: + return nil, fmt.Errorf("unsupported public key type %v", pkType) + } + tlsSig := tls.DigitallySigned{ + Algorithm: tls.SignatureAndHashAlgorithm{ + Hash: tls.SHA256, + Signature: sigAlgo, + }, + Signature: rawSig, + } + if err := tls.VerifySignature(pubKey, llData, tlsSig); err != nil { + return nil, fmt.Errorf("failed to verify signature: %v", err) + } + return NewFromJSON(llData) +} + +// FindLogByName returns all logs whose names contain the given string. +func (ll *LogList) FindLogByName(name string) []*Log { + name = strings.ToLower(name) + var results []*Log + for _, op := range ll.Operators { + for _, log := range op.Logs { + if strings.Contains(strings.ToLower(log.Description), name) { + results = append(results, log) + } + } + } + return results +} + +// FindLogByURL finds the log with the given URL. +func (ll *LogList) FindLogByURL(url string) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + // Don't count trailing slashes + if strings.TrimRight(log.URL, "/") == strings.TrimRight(url, "/") { + return log + } + } + } + return nil +} + +// FindLogByKeyHash finds the log with the given key hash. +func (ll *LogList) FindLogByKeyHash(keyhash [sha256.Size]byte) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + if bytes.Equal(log.LogID, keyhash[:]) { + return log + } + } + } + return nil +} + +// FindLogByKeyHashPrefix finds all logs whose key hash starts with the prefix. +func (ll *LogList) FindLogByKeyHashPrefix(prefix string) []*Log { + var results []*Log + for _, op := range ll.Operators { + for _, log := range op.Logs { + hh := hex.EncodeToString(log.LogID[:]) + if strings.HasPrefix(hh, prefix) { + results = append(results, log) + } + } + } + return results +} + +// FindLogByKey finds the log with the given DER-encoded key. +func (ll *LogList) FindLogByKey(key []byte) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + if bytes.Equal(log.Key[:], key) { + return log + } + } + } + return nil +} + +var hexDigits = regexp.MustCompile("^[0-9a-fA-F]+$") + +// FuzzyFindLog tries to find logs that match the given unspecified input, +// whose format is unspecified. This generally returns a single log, but +// if text input that matches multiple log descriptions is provided, then +// multiple logs may be returned. +func (ll *LogList) FuzzyFindLog(input string) []*Log { + input = strings.Trim(input, " \t") + if logs := ll.FindLogByName(input); len(logs) > 0 { + return logs + } + if log := ll.FindLogByURL(input); log != nil { + return []*Log{log} + } + // Try assuming the input is binary data of some form. First base64: + if data, err := base64.StdEncoding.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindLogByKeyHash(hash); log != nil { + return []*Log{log} + } + } + if log := ll.FindLogByKey(data); log != nil { + return []*Log{log} + } + } + // Now hex, but strip all internal whitespace first. + input = stripInternalSpace(input) + if data, err := hex.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindLogByKeyHash(hash); log != nil { + return []*Log{log} + } + } + if log := ll.FindLogByKey(data); log != nil { + return []*Log{log} + } + } + // Finally, allow hex strings with an odd number of digits. + if hexDigits.MatchString(input) { + if logs := ll.FindLogByKeyHashPrefix(input); len(logs) > 0 { + return logs + } + } + + return nil +} + +func stripInternalSpace(input string) string { + return strings.Map(func(r rune) rune { + if !unicode.IsSpace(r) { + return r + } + return -1 + }, input) +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go new file mode 100644 index 00000000..84c7bbdf --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=LogStatus"; DO NOT EDIT. + +package loglist3 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UndefinedLogStatus-0] + _ = x[PendingLogStatus-1] + _ = x[QualifiedLogStatus-2] + _ = x[UsableLogStatus-3] + _ = x[ReadOnlyLogStatus-4] + _ = x[RetiredLogStatus-5] + _ = x[RejectedLogStatus-6] +} + +const _LogStatus_name = "UndefinedLogStatusPendingLogStatusQualifiedLogStatusUsableLogStatusReadOnlyLogStatusRetiredLogStatusRejectedLogStatus" + +var _LogStatus_index = [...]uint8{0, 18, 34, 52, 67, 84, 100, 117} + +func (i LogStatus) String() string { + if i < 0 || i >= LogStatus(len(_LogStatus_index)-1) { + return "LogStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _LogStatus_name[_LogStatus_index[i]:_LogStatus_index[i+1]] +} diff --git a/vendor/github.com/google/certificate-transparency-go/types.go b/vendor/github.com/google/certificate-transparency-go/types.go index b6af606b..2a96f6a0 100644 --- a/vendor/github.com/google/certificate-transparency-go/types.go +++ b/vendor/github.com/google/certificate-transparency-go/types.go @@ -248,6 +248,21 @@ type CertificateChain struct { Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"` } +// PrecertChainEntryHash is an extended PrecertChainEntry type with the +// IssuanceChainHash field added to store the hash of the +// CertificateChain field of PrecertChainEntry. +type PrecertChainEntryHash struct { + PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"` + IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"` +} + +// CertificateChainHash is an extended CertificateChain type with the +// IssuanceChainHash field added to store the hash of the +// Entries field of CertificateChain. +type CertificateChainHash struct { + IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"` +} + // JSONDataEntry holds arbitrary data. type JSONDataEntry struct { Data []byte `tls:"minlen:0,maxlen:1677215"` diff --git a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go index b2e3f186..6d475700 100644 --- a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go +++ b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go @@ -51,7 +51,7 @@ func Error(err error) error { if perr != nil { return err // If the URL can't be parsed, just return the original error. } - uerr.URL = URL(u).String() // Update the URL to the redacted URL. + uerr.URL = URL(u) // Update the URL to the redacted URL. return uerr } @@ -73,7 +73,7 @@ var paramAllowlist = map[string]struct{}{ } // URL redacts potentially sensitive query parameter values from the URL's query string. -func URL(u *url.URL) *url.URL { +func URL(u *url.URL) string { qs := u.Query() for k, v := range qs { for i := range v { @@ -85,5 +85,5 @@ func URL(u *url.URL) *url.URL { } r := *u r.RawQuery = qs.Encode() - return &r + return r.Redacted() } diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go index 172d218e..1555efae 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go @@ -15,6 +15,7 @@ package authn import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -27,6 +28,22 @@ type Authenticator interface { Authorization() (*AuthConfig, error) } +// ContextAuthenticator is like Authenticator, but allows for context to be passed in. +type ContextAuthenticator interface { + // Authorization returns the value to use in an http transport's Authorization header. + AuthorizationContext(context.Context) (*AuthConfig, error) +} + +// Authorization calls AuthorizationContext with ctx if the given [Authenticator] implements [ContextAuthenticator], +// otherwise it calls Resolve with the given [Resource]. +func Authorization(ctx context.Context, authn Authenticator) (*AuthConfig, error) { + if actx, ok := authn.(ContextAuthenticator); ok { + return actx.AuthorizationContext(ctx) + } + + return authn.Authorization() +} + // AuthConfig contains authorization information for connecting to a Registry // Inlined what we use from github.com/docker/cli/cli/config/types type AuthConfig struct { diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go index 99e0b81c..6e8814d8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -15,6 +15,7 @@ package authn import ( + "context" "os" "path/filepath" "sync" @@ -45,6 +46,11 @@ type Keychain interface { Resolve(Resource) (Authenticator, error) } +// ContextKeychain is like Keychain, but allows for context to be passed in. +type ContextKeychain interface { + ResolveContext(context.Context, Resource) (Authenticator, error) +} + // defaultKeychain implements Keychain with the semantics of the standard Docker // credential keychain. type defaultKeychain struct { @@ -62,8 +68,23 @@ const ( DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/" ) -// Resolve implements Keychain. +// Resolve calls ResolveContext with ctx if the given [Keychain] implements [ContextKeychain], +// otherwise it calls Resolve with the given [Resource]. +func Resolve(ctx context.Context, keychain Keychain, target Resource) (Authenticator, error) { + if rctx, ok := keychain.(ContextKeychain); ok { + return rctx.ResolveContext(ctx, target) + } + + return keychain.Resolve(target) +} + +// ResolveContext implements ContextKeychain. func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { + return dk.ResolveContext(context.Background(), target) +} + +// Resolve implements Keychain. +func (dk *defaultKeychain) ResolveContext(_ context.Context, target Resource) (Authenticator, error) { dk.mu.Lock() defer dk.mu.Unlock() @@ -86,8 +107,8 @@ func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { // config.Load, which may fail if the config can't be parsed. // // If neither was found, look for Podman's auth at - // $XDG_RUNTIME_DIR/containers/auth.json and attempt to load it as a - // Docker config. + // $REGISTRY_AUTH_FILE or $XDG_RUNTIME_DIR/containers/auth.json + // and attempt to load it as a Docker config. // // If neither are found, fallback to Anonymous. var cf *configfile.ConfigFile @@ -96,16 +117,28 @@ func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { if err != nil { return nil, err } - } else { + } else if fileExists(os.Getenv("REGISTRY_AUTH_FILE")) { + f, err := os.Open(os.Getenv("REGISTRY_AUTH_FILE")) + if err != nil { + return nil, err + } + defer f.Close() + cf, err = config.LoadFromReader(f) + if err != nil { + return nil, err + } + } else if fileExists(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) { f, err := os.Open(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) if err != nil { - return Anonymous, nil + return nil, err } defer f.Close() cf, err = config.LoadFromReader(f) if err != nil { return nil, err } + } else { + return Anonymous, nil } // See: @@ -168,6 +201,10 @@ func NewKeychainFromHelper(h Helper) Keychain { return wrapper{h} } type wrapper struct{ h Helper } func (w wrapper) Resolve(r Resource) (Authenticator, error) { + return w.ResolveContext(context.Background(), r) +} + +func (w wrapper) ResolveContext(_ context.Context, r Resource) (Authenticator, error) { u, p, err := w.h.Get(r.RegistryStr()) if err != nil { return Anonymous, nil @@ -194,8 +231,12 @@ type refreshingKeychain struct { } func (r *refreshingKeychain) Resolve(target Resource) (Authenticator, error) { + return r.ResolveContext(context.Background(), target) +} + +func (r *refreshingKeychain) ResolveContext(ctx context.Context, target Resource) (Authenticator, error) { last := time.Now() - auth, err := r.keychain.Resolve(target) + auth, err := Resolve(ctx, r.keychain, target) if err != nil || auth == Anonymous { return auth, err } @@ -224,17 +265,21 @@ type refreshing struct { } func (r *refreshing) Authorization() (*AuthConfig, error) { + return r.AuthorizationContext(context.Background()) +} + +func (r *refreshing) AuthorizationContext(ctx context.Context) (*AuthConfig, error) { r.Lock() defer r.Unlock() if r.cached == nil || r.expired() { r.last = r.now() - auth, err := r.keychain.Resolve(r.target) + auth, err := Resolve(ctx, r.keychain, r.target) if err != nil { return nil, err } r.cached = auth } - return r.cached.Authorization() + return Authorization(ctx, r.cached) } func (r *refreshing) now() time.Time { diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go index 3b1804f5..fe241a0f 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go @@ -14,6 +14,8 @@ package authn +import "context" + type multiKeychain struct { keychains []Keychain } @@ -28,8 +30,12 @@ func NewMultiKeychain(kcs ...Keychain) Keychain { // Resolve implements Keychain. func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) { + return mk.ResolveContext(context.Background(), target) +} + +func (mk *multiKeychain) ResolveContext(ctx context.Context, target Resource) (Authenticator, error) { for _, kc := range mk.keychains { - auth, err := kc.Resolve(target) + auth, err := Resolve(ctx, kc, target) if err != nil { return nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go index c049c1ef..28f6967b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -17,6 +17,7 @@ package name import ( // nolint: depguard _ "crypto/sha256" // Recommended by go-digest. + "encoding/json" "strings" "github.com/opencontainers/go-digest" @@ -59,6 +60,25 @@ func (d Digest) String() string { return d.original } +// MarshalJSON formats the digest into a string for JSON serialization. +func (d Digest) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// UnmarshalJSON parses a JSON string into a Digest. +func (d *Digest) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewDigest(s) + if err != nil { + return err + } + *d = n + return nil +} + // NewDigest returns a new Digest representing the given name. func NewDigest(name string, opts ...Option) (Digest, error) { // Split on "@" diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go index 912ab330..0a048677 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -44,7 +44,7 @@ func ParseReference(s string, opts ...Option) (Reference, error) { if d, err := NewDigest(s, opts...); err == nil { return d, nil } - return nil, newErrBadName("could not parse reference: " + s) + return nil, newErrBadName("could not parse reference: %s", s) } type stringConst string diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go index d6e35c39..19e4d1db 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go @@ -22,6 +22,8 @@ import ( "io" "os" "path/filepath" + "runtime" + "sync" "github.com/google/go-containerregistry/pkg/logs" v1 "github.com/google/go-containerregistry/pkg/v1" @@ -37,6 +39,9 @@ var layoutFile = `{ "imageLayoutVersion": "1.0.0" }` +// renameMutex guards os.Rename calls in AppendImage on Windows only. +var renameMutex sync.Mutex + // AppendImage writes a v1.Image to the Path and updates // the index.json to reference it. func (l Path) AppendImage(img v1.Image, options ...Option) error { @@ -259,6 +264,11 @@ func (l Path) writeBlob(hash v1.Hash, size int64, rc io.ReadCloser, renamer func } renamePath := l.path("blobs", finalHash.Algorithm, finalHash.Hex) + + if runtime.GOOS == "windows" { + renameMutex.Lock() + defer renameMutex.Unlock() + } return os.Rename(w.Name(), renamePath) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index 1a24b10d..4207740c 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "maps" "path/filepath" "strings" "time" @@ -165,16 +166,16 @@ func Annotations(f partial.WithRawManifest, anns map[string]string) partial.With if img, ok := f.(v1.Image); ok { return &image{ base: img, - annotations: anns, + annotations: maps.Clone(anns), } } if idx, ok := f.(v1.ImageIndex); ok { return &index{ base: idx, - annotations: anns, + annotations: maps.Clone(anns), } } - return arbitraryRawManifest{a: f, anns: anns} + return arbitraryRawManifest{a: f, anns: maps.Clone(anns)} } type arbitraryRawManifest struct { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go index 4e61002b..d77b37c0 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go @@ -47,7 +47,7 @@ type fetcher struct { func makeFetcher(ctx context.Context, target resource, o *options) (*fetcher, error) { auth := o.auth if o.keychain != nil { - kauth, err := o.keychain.Resolve(target) + kauth, err := authn.Resolve(ctx, o.keychain, target) if err != nil { return nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go index 1c07bd47..332d8ca0 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go @@ -125,6 +125,20 @@ func (p *Pusher) writer(ctx context.Context, repo name.Repository, o *options) ( return rw, rw.init(ctx) } +func (p *Pusher) Put(ctx context.Context, ref name.Reference, t Taggable) error { + w, err := p.writer(ctx, ref.Context(), p.o) + if err != nil { + return err + } + + m, err := taggableToManifest(t) + if err != nil { + return err + } + + return w.commitManifest(ctx, ref, m) +} + func (p *Pusher) Push(ctx context.Context, ref name.Reference, t Taggable) error { w, err := p.writer(ctx, ref.Context(), p.o) if err != nil { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go index e30ca57e..4bc6f70a 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go @@ -61,12 +61,12 @@ func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, } defer resp.Body.Close() - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil { + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest, http.StatusNotAcceptable); err != nil { return nil, err } var b []byte - if resp.StatusCode == http.StatusOK { + if resp.StatusCode == http.StatusOK && resp.Header.Get("Content-Type") == string(types.OCIImageIndex) { b, err = io.ReadAll(resp.Body) if err != nil { return nil, err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go index fdb362b7..f2452469 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go @@ -33,7 +33,7 @@ var _ http.RoundTripper = (*basicTransport)(nil) // RoundTrip implements http.RoundTripper func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { if bt.auth != authn.Anonymous { - auth, err := bt.auth.Authorization() + auth, err := authn.Authorization(in.Context(), bt.auth) if err != nil { return nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go index cb156749..ea652d4a 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -24,8 +24,10 @@ import ( "net/http" "net/url" "strings" + "sync" authchallenge "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/google/go-containerregistry/internal/redact" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/logs" @@ -49,7 +51,7 @@ func Exchange(ctx context.Context, reg name.Registry, auth authn.Authenticator, if err != nil { return nil, err } - authcfg, err := auth.Authorization() + authcfg, err := authn.Authorization(ctx, auth) if err != nil { return nil, err } @@ -98,6 +100,7 @@ func fromChallenge(reg name.Registry, auth authn.Authenticator, t http.RoundTrip } type bearerTransport struct { + mx sync.RWMutex // Wrapped by bearerTransport. inner http.RoundTripper // Basic credentials that we exchange for bearer tokens. @@ -139,7 +142,10 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { // the registry with which we are interacting. // In case of redirect http.Client can use an empty Host, check URL too. if matchesHost(bt.registry.RegistryStr(), in, bt.scheme) { - hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) + bt.mx.RLock() + localToken := bt.bearer.RegistryToken + bt.mx.RUnlock() + hdr := fmt.Sprintf("Bearer %s", localToken) in.Header.Set("Authorization", hdr) } return bt.inner.RoundTrip(in) @@ -156,11 +162,12 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { res.Body.Close() newScopes := []string{} + bt.mx.Lock() + got := stringSet(bt.scopes) for _, wac := range challenges { // TODO(jonjohnsonjr): Should we also update "realm" or "service"? if want, ok := wac.Parameters["scope"]; ok { // Add any scopes that we don't already request. - got := stringSet(bt.scopes) if _, ok := got[want]; !ok { newScopes = append(newScopes, want) } @@ -172,6 +179,7 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { // otherwise the registry might just ignore it :/ newScopes = append(newScopes, bt.scopes...) bt.scopes = newScopes + bt.mx.Unlock() // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge. @@ -190,13 +198,15 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { // The basic token exchange is attempted first, falling back to the oauth flow. // If the IdentityToken is set, this indicates that we should start with the oauth flow. func (bt *bearerTransport) refresh(ctx context.Context) error { - auth, err := bt.basic.Authorization() + auth, err := authn.Authorization(ctx, bt.basic) if err != nil { return err } if auth.RegistryToken != "" { + bt.mx.Lock() bt.bearer.RegistryToken = auth.RegistryToken + bt.mx.Unlock() return nil } @@ -212,7 +222,9 @@ func (bt *bearerTransport) refresh(ctx context.Context) error { // Find a token to turn into a Bearer authenticator if response.Token != "" { + bt.mx.Lock() bt.bearer.RegistryToken = response.Token + bt.mx.Unlock() } // If we obtained a refresh token from the oauth flow, use that for refresh() now. @@ -295,7 +307,7 @@ func canonicalAddress(host, scheme string) (address string) { // https://docs.docker.com/registry/spec/auth/oauth/ func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { - auth, err := bt.basic.Authorization() + auth, err := authn.Authorization(ctx, bt.basic) if err != nil { return nil, err } @@ -306,7 +318,9 @@ func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { } v := url.Values{} + bt.mx.RLock() v.Set("scope", strings.Join(bt.scopes, " ")) + bt.mx.RUnlock() if bt.service != "" { v.Set("service", bt.service) } @@ -362,7 +376,9 @@ func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { client := http.Client{Transport: b} v := u.Query() + bt.mx.RLock() v["scope"] = bt.scopes + bt.mx.RUnlock() v.Set("service", bt.service) u.RawQuery = v.Encode() diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index 04a3989a..1167cb79 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -45,14 +45,7 @@ type Taggable interface { // Write pushes the provided img to the specified image reference. func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) { - o, err := makeOptions(options...) - if err != nil { - return err - } - if o.progress != nil { - defer func() { o.progress.Close(rerr) }() - } - return newPusher(o).Push(o.context, ref, img) + return Push(ref, img, options...) } // writer writes the elements of an image to a remote image reference. @@ -76,7 +69,7 @@ type writer struct { func makeWriter(ctx context.Context, repo name.Repository, ls []v1.Layer, o *options) (*writer, error) { auth := o.auth if o.keychain != nil { - kauth, err := o.keychain.Resolve(repo) + kauth, err := authn.Resolve(ctx, o.keychain, repo) if err != nil { return nil, err } @@ -656,14 +649,7 @@ func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { // WriteIndex will attempt to push all of the referenced manifests before // attempting to push the ImageIndex, to retain referential integrity. func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) { - o, err := makeOptions(options...) - if err != nil { - return err - } - if o.progress != nil { - defer func() { o.progress.Close(rerr) }() - } - return newPusher(o).Push(o.context, ref, ii) + return Push(ref, ii, options...) } // WriteLayer uploads the provided Layer to the specified repo. @@ -709,5 +695,17 @@ func Put(ref name.Reference, t Taggable, options ...Option) error { if err != nil { return err } + return newPusher(o).Put(o.context, ref, t) +} + +// Push uploads the given Taggable to the specified reference. +func Push(ref name.Reference, t Taggable, options ...Option) (rerr error) { + o, err := makeOptions(options...) + if err != nil { + return err + } + if o.progress != nil { + defer func() { o.progress.Close(rerr) }() + } return newPusher(o).Push(o.context, ref, t) } diff --git a/vendor/github.com/in-toto/attestation/LICENSE b/vendor/github.com/in-toto/attestation/LICENSE new file mode 100644 index 00000000..702a3365 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/LICENSE @@ -0,0 +1,13 @@ +Copyright 2021 in-toto Developers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go new file mode 100644 index 00000000..51654e95 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go @@ -0,0 +1,61 @@ +/* +Wrapper APIs for in-toto attestation ResourceDescriptor protos. +*/ + +package v1 + +import ( + "encoding/hex" + "errors" + "fmt" +) + +var ( + ErrIncorrectDigestLength = errors.New("digest has incorrect length") + ErrInvalidDigestEncoding = errors.New("digest is not valid hex-encoded string") + ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required") +) + +// Indicates if a given fixed-size hash algorithm is supported by default and returns the algorithm's +// digest size in bytes, if supported. We assume gitCommit and dirHash are aliases for sha1 and sha256, respectively. +// +// SHA digest sizes from https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// MD5 digest size from https://www.rfc-editor.org/rfc/rfc1321.html#section-1 +func isSupportedFixedSizeAlgorithm(alg string) (bool, int) { + algos := map[string]int{"md5": 16, "sha1": 20, "sha224": 28, "sha512_224": 28, "sha256": 32, "sha512_256": 32, "sha384": 48, "sha512": 64, "sha3_224": 28, "sha3_256": 32, "sha3_384": 48, "sha3_512": 64, "gitCommit": 20, "dirHash": 32} + + size, ok := algos[alg] + return ok, size +} + +func (d *ResourceDescriptor) Validate() error { + // at least one of name, URI or digest are required + if d.GetName() == "" && d.GetUri() == "" && len(d.GetDigest()) == 0 { + return ErrRDRequiredField + } + + if len(d.GetDigest()) > 0 { + for alg, digest := range d.GetDigest() { + + // Per https://github.com/in-toto/attestation/blob/main/spec/v1/digest_set.md + // check encoding and length for supported algorithms; + // use of custom, unsupported algorithms is allowed and does not not generate validation errors. + supported, size := isSupportedFixedSizeAlgorithm(alg) + if supported { + // the in-toto spec expects a hex-encoded string in DigestSets for supported algorithms + hashBytes, err := hex.DecodeString(digest) + + if err != nil { + return fmt.Errorf("%w (%s: %s)", ErrInvalidDigestEncoding, alg, digest) + } + + // check the length of the digest + if len(hashBytes) != size { + return fmt.Errorf("%w: got %d bytes, want %d bytes (%s: %s)", ErrIncorrectDigestLength, len(hashBytes), size, alg, digest) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go new file mode 100644 index 00000000..3e59869b --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v4.24.4 +// source: in_toto_attestation/v1/resource_descriptor.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 ResourceDescriptor. +// https://github.com/in-toto/attestation/blob/main/spec/v1/resource_descriptor.md +// Validation of all fields is left to the users of this proto. +type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` + DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` + MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // Per the Struct protobuf spec, this type corresponds to + // a JSON Object, which is truly a map under the hood. + // So, the Struct a) is still consistent with our specification for + // the `annotations` field, and b) has native support in some language + // bindings making their use easier in implementations. + // See: https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb#Struct + Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceDescriptor) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *ResourceDescriptor) GetDigest() map[string]string { + if x != nil { + return x.Digest + } + return nil +} + +func (x *ResourceDescriptor) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *ResourceDescriptor) GetDownloadLocation() string { + if x != nil { + return x.DownloadLocation + } + return "" +} + +func (x *ResourceDescriptor) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ResourceDescriptor) GetAnnotations() *structpb.Struct { + if x != nil { + return x.Annotations + } + return nil +} + +var File_in_toto_attestation_v1_resource_descriptor_proto protoreflect.FileDescriptor + +var file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x30, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x16, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x4e, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, + 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x77, 0x6e, + 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x47, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x69, + 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x69, 0x6e, 0x2d, 0x74, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc +) + +func file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData) + }) + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData +} + +var file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []interface{}{ + (*ResourceDescriptor)(nil), // 0: in_toto_attestation.v1.ResourceDescriptor + nil, // 1: in_toto_attestation.v1.ResourceDescriptor.DigestEntry + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.ResourceDescriptor.digest:type_name -> in_toto_attestation.v1.ResourceDescriptor.DigestEntry + 2, // 1: in_toto_attestation.v1.ResourceDescriptor.annotations:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_resource_descriptor_proto_init() } +func file_in_toto_attestation_v1_resource_descriptor_proto_init() { + if File_in_toto_attestation_v1_resource_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_resource_descriptor_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_resource_descriptor_proto = out.File + file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = nil + file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = nil + file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.go b/vendor/github.com/in-toto/attestation/go/v1/statement.go new file mode 100644 index 00000000..f63d5f0d --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.go @@ -0,0 +1,50 @@ +/* +Wrapper APIs for in-toto attestation Statement layer protos. +*/ + +package v1 + +import "errors" + +const StatementTypeUri = "https://in-toto.io/Statement/v1" + +var ( + ErrInvalidStatementType = errors.New("wrong statement type") + ErrSubjectRequired = errors.New("at least one subject required") + ErrDigestRequired = errors.New("at least one digest required") + ErrPredicateTypeRequired = errors.New("predicate type required") + ErrPredicateRequired = errors.New("predicate object required") +) + +func (s *Statement) Validate() error { + if s.GetType() != StatementTypeUri { + return ErrInvalidStatementType + } + + if s.GetSubject() == nil || len(s.GetSubject()) == 0 { + return ErrSubjectRequired + } + + // check all resource descriptors in the subject + subject := s.GetSubject() + for _, rd := range subject { + if err := rd.Validate(); err != nil { + return err + } + + // v1 statements require the digest to be set in the subject + if len(rd.GetDigest()) == 0 { + return ErrDigestRequired + } + } + + if s.GetPredicateType() == "" { + return ErrPredicateTypeRequired + } + + if s.GetPredicate() == nil { + return ErrPredicateRequired + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go new file mode 100644 index 00000000..a2bd2c2d --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v4.24.4 +// source: in_toto_attestation/v1/statement.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 Statement. +// https://github.com/in-toto/attestation/tree/main/spec/v1 +// Validation of all fields is left to the users of this proto. +type Statement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Expected to always be "https://in-toto.io/Statement/v1" + Type string `protobuf:"bytes,1,opt,name=type,json=_type,proto3" json:"type,omitempty"` + Subject []*ResourceDescriptor `protobuf:"bytes,2,rep,name=subject,proto3" json:"subject,omitempty"` + PredicateType string `protobuf:"bytes,3,opt,name=predicate_type,json=predicateType,proto3" json:"predicate_type,omitempty"` + Predicate *structpb.Struct `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` +} + +func (x *Statement) Reset() { + *x = Statement{} + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Statement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Statement) ProtoMessage() {} + +func (x *Statement) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Statement.ProtoReflect.Descriptor instead. +func (*Statement) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_statement_proto_rawDescGZIP(), []int{0} +} + +func (x *Statement) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Statement) GetSubject() []*ResourceDescriptor { + if x != nil { + return x.Subject + } + return nil +} + +func (x *Statement) GetPredicateType() string { + if x != nil { + return x.PredicateType + } + return "" +} + +func (x *Statement) GetPredicate() *structpb.Struct { + if x != nil { + return x.Predicate + } + return nil +} + +var File_in_toto_attestation_v1_statement_proto protoreflect.FileDescriptor + +var file_in_toto_attestation_v1_statement_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, + 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x30, + 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xc4, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x13, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x35, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x70, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x47, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x74, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x6e, 0x2d, 0x74, 0x6f, 0x74, 0x6f, 0x2f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_in_toto_attestation_v1_statement_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_statement_proto_rawDescData = file_in_toto_attestation_v1_statement_proto_rawDesc +) + +func file_in_toto_attestation_v1_statement_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_statement_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_statement_proto_rawDescData = protoimpl.X.CompressGZIP(file_in_toto_attestation_v1_statement_proto_rawDescData) + }) + return file_in_toto_attestation_v1_statement_proto_rawDescData +} + +var file_in_toto_attestation_v1_statement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_in_toto_attestation_v1_statement_proto_goTypes = []interface{}{ + (*Statement)(nil), // 0: in_toto_attestation.v1.Statement + (*ResourceDescriptor)(nil), // 1: in_toto_attestation.v1.ResourceDescriptor + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_statement_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.Statement.subject:type_name -> in_toto_attestation.v1.ResourceDescriptor + 2, // 1: in_toto_attestation.v1.Statement.predicate:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_statement_proto_init() } +func file_in_toto_attestation_v1_statement_proto_init() { + if File_in_toto_attestation_v1_statement_proto != nil { + return + } + file_in_toto_attestation_v1_resource_descriptor_proto_init() + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_statement_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Statement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_in_toto_attestation_v1_statement_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_statement_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_statement_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_statement_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_statement_proto = out.File + file_in_toto_attestation_v1_statement_proto_rawDesc = nil + file_in_toto_attestation_v1_statement_proto_goTypes = nil + file_in_toto_attestation_v1_statement_proto_depIdxs = nil +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4c28dff4..4528059c 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,9 +1,8 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.10.1 builds: - @@ -32,7 +31,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2d" binary: s2d @@ -59,7 +57,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2sx" binary: s2sx @@ -87,7 +84,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble archives: - @@ -103,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7e83f583..de264c85 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,51 @@ This package provides various compression algorithms. # changelog +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + * Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 @@ -31,6 +76,10 @@ This package provides various compression algorithms. * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -53,7 +102,7 @@ This package provides various compression algorithms. * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -69,6 +118,7 @@ This package provides various compression algorithms. * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -107,7 +157,7 @@ This package provides various compression algorithms. * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -310,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -489,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. @@ -536,6 +586,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. @@ -554,7 +606,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: -``` +```go // replace 'ioutil.Discard' with your output. gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) if err != nil { diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7..0c7dd4ff 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b2..0f56b02d 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a..2754bac6 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 2263853f..5a4412f9 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,4 @@ module github.com/klauspost/compress -go 1.16 +go 1.19 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce60..9c28840c 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { @@ -595,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925a..32a7f401 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index f6a24097..6a5a2988 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -95,42 +95,54 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { *h = Header{} if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 b, in := in[:4], in[4:] if string(b) != frameMagic { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch + return nil, ErrMagicMismatch } if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 h.Skippable = true h.SkippableID = int(b[0] & 0xf) h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil + return in[4:], nil } // Read Window_Descriptor // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } fhd, in := in[0], in[1:] h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") + return nil, errors.New("reserved bit set on frame header") } if !h.SingleSegment { if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] @@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error { size = 4 } if len(in) < int(size) { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:size], in[size:] h.HeaderSize += int(size) @@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error { if fcsSize > 0 { h.HasFCS = true if len(in) < fcsSize { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) @@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error { // Frame Header done, we will not fail from now on. if len(in) < 3 { - return nil + return in, nil } tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) @@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error { cSize := int(bh >> 3) switch blockType { case blockTypeReserved: - return nil + return in, nil case blockTypeRLE: h.FirstBlock.Compressed = true h.FirstBlock.DecompressedSize = cSize @@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error { } h.FirstBlock.OK = true - return nil + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21..bbca1723 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe..b7b83164 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c81a1535..4613724e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] @@ -201,14 +213,6 @@ encodeLoop: if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { return } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } // Try to quick reject if we already have a long match. if m.length > 16 { left := len(src) - int(m.s+m.length) @@ -227,8 +231,10 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if true { + if m.rep <= 0 { // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. tMin := s - e.maxMatchOff if tMin < 0 { tMin = 0 @@ -239,7 +245,14 @@ encodeLoop: l++ } } - + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } cand := match{offset: offset, s: s, length: l, rep: rep} cand.estBits(bitsPerByte) if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { @@ -336,24 +349,31 @@ encodeLoop: } if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) } } // We have a match, we can store the forward value + s = best.s if best.rep > 0 { var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s < nextEmit { - panic("s < nextEmit") - } addLiterals(&seq, best.s) // Repeat. If bit 4 is set, this is a non-lit repeat. seq.offset = uint32(best.rep & 3) if debugSequences { - println("repeat sequence", seq, "next s:", s) + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) } blk.sequences = append(blk.sequences, seq) @@ -396,7 +416,6 @@ encodeLoop: // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. - s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e..84a79fde 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] @@ -168,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -199,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -230,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -259,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -697,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -727,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -761,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -790,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f..d36be7bd 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0..8f8223cd 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index faaf8192..20671dcb 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption { // The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. -// The default value is determined by the compression level. +// The default value is determined by the compression level and max 8MB. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: - o.windowSize = 16 << 20 + o.windowSize = 8 << 20 case SpeedBestCompression: - o.windowSize = 32 << 20 + o.windowSize = 8 << 20 } } if !o.customALEntropy { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7..e47af66e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 2f5d5ed4..667ca067 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte { if f.SingleSegment { dst = append(dst, uint8(f.ContentSize)) } - // Unless SingleSegment is set, framessizes < 256 are nto stored. + // Unless SingleSegment is set, framessizes < 256 are not stored. case 1: f.ContentSize -= 256 dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 332e51fe..8adfebb0 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error { if v == -1 { s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) + v = 1 } + symbolNext[i] = uint16(v) } } @@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error { for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { + for { // lowprob area position = (position + step) & tableMask + if position <= highThreshold { + break + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e08..ae7d4d32 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd82..c59f17e0 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 974b9972..f5591fa1 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1826,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2391,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -2914,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3581,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc73..066bef2a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -88,6 +88,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go index 1d7e2408..d5e7a872 100644 --- a/vendor/github.com/letsencrypt/boulder/core/challenges.go +++ b/vendor/github.com/letsencrypt/boulder/core/challenges.go @@ -10,27 +10,23 @@ func newChallenge(challengeType AcmeChallenge, token string) Challenge { } } -// HTTPChallenge01 constructs a random http-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// HTTPChallenge01 constructs a http-01 challenge. func HTTPChallenge01(token string) Challenge { return newChallenge(ChallengeTypeHTTP01, token) } -// DNSChallenge01 constructs a random dns-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// DNSChallenge01 constructs a dns-01 challenge. func DNSChallenge01(token string) Challenge { return newChallenge(ChallengeTypeDNS01, token) } -// TLSALPNChallenge01 constructs a random tls-alpn-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// TLSALPNChallenge01 constructs a tls-alpn-01 challenge. func TLSALPNChallenge01(token string) Challenge { return newChallenge(ChallengeTypeTLSALPN01, token) } -// NewChallenge constructs a random challenge of the given kind. It returns an -// error if the challenge type is unrecognized. If token is empty a random token -// will be generated, otherwise the provided token is used. +// NewChallenge constructs a challenge of the given kind. It returns an +// error if the challenge type is unrecognized. func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) { switch kind { case ChallengeTypeHTTP01: diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go index 003329c3..59b55a3f 100644 --- a/vendor/github.com/letsencrypt/boulder/core/interfaces.go +++ b/vendor/github.com/letsencrypt/boulder/core/interfaces.go @@ -7,7 +7,7 @@ import ( // PolicyAuthority defines the public interface for the Boulder PA // TODO(#5891): Move this interface to a more appropriate location. type PolicyAuthority interface { - WillingToIssueWildcards([]identifier.ACMEIdentifier) error + WillingToIssue([]string) error ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error) ChallengeTypeEnabled(AcmeChallenge) bool CheckAuthz(*Authorization) error diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index b52f0f5e..c01f551a 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -10,8 +10,8 @@ import ( "strings" "time" + "github.com/go-jose/go-jose/v4" "golang.org/x/crypto/ocsp" - "gopkg.in/go-jose/go-jose.v2" "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/probs" @@ -119,7 +119,7 @@ type Registration struct { } // ValidationRecord represents a validation attempt against a specific URL/hostname -// and the IP addresses that were resolved and used +// and the IP addresses that were resolved and used. type ValidationRecord struct { // SimpleHTTP only URL string `json:"url,omitempty"` @@ -144,20 +144,17 @@ type ValidationRecord struct { // ... // } AddressesTried []net.IP `json:"addressesTried,omitempty"` -} - -func looksLikeKeyAuthorization(str string) error { - parts := strings.Split(str, ".") - if len(parts) != 2 { - return fmt.Errorf("Invalid key authorization: does not look like a key authorization") - } else if !LooksLikeAToken(parts[0]) { - return fmt.Errorf("Invalid key authorization: malformed token") - } else if !LooksLikeAToken(parts[1]) { - // Thumbprints have the same syntax as tokens in boulder - // Both are base64-encoded and 32 octets - return fmt.Errorf("Invalid key authorization: malformed key thumbprint") - } - return nil + // ResolverAddrs is the host:port of the DNS resolver(s) that fulfilled the + // lookup for AddressUsed. During recursive A and AAAA lookups, a record may + // instead look like A:host:port or AAAA:host:port + ResolverAddrs []string `json:"resolverAddrs,omitempty"` + // UsedRSAKEX is a *temporary* addition to the validation record, so we can + // see how many servers that we reach out to during HTTP-01 and TLS-ALPN-01 + // validation are only willing to negotiate RSA key exchange mechanisms. The + // field is not included in the serialized json to avoid cluttering the + // database and log lines. + // TODO(#7321): Remove this when we have collected sufficient data. + UsedRSAKEX bool `json:"-"` } // Challenge is an aggregate of all data needed for any challenges. @@ -166,38 +163,38 @@ func looksLikeKeyAuthorization(str string) error { // challenge, we just throw all the elements into one bucket, // together with the common metadata elements. type Challenge struct { - // The type of challenge + // Type is the type of challenge encoded in this object. Type AcmeChallenge `json:"type"` - // The status of this challenge - Status AcmeStatus `json:"status,omitempty"` + // URL is the URL to which a response can be posted. Required for all types. + URL string `json:"url,omitempty"` - // Contains the error that occurred during challenge validation, if any - Error *probs.ProblemDetails `json:"error,omitempty"` + // Status is the status of this challenge. Required for all types. + Status AcmeStatus `json:"status,omitempty"` - // A URI to which a response can be POSTed - URI string `json:"uri,omitempty"` + // Validated is the time at which the server validated the challenge. Required + // if status is valid. + Validated *time.Time `json:"validated,omitempty"` - // For the V2 API the "URI" field is deprecated in favour of URL. - URL string `json:"url,omitempty"` + // Error contains the error that occurred during challenge validation, if any. + // If set, the Status must be "invalid". + Error *probs.ProblemDetails `json:"error,omitempty"` - // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges + // Token is a random value that uniquely identifies the challenge. It is used + // by all current challenges (http-01, tls-alpn-01, and dns-01). Token string `json:"token,omitempty"` - // The expected KeyAuthorization for validation of the challenge. Populated by - // the RA prior to passing the challenge to the VA. For legacy reasons this - // field is called "ProvidedKeyAuthorization" because it was initially set by - // the content of the challenge update POST from the client. It is no longer - // set that way and should be renamed to "KeyAuthorization". - // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`. + // ProvidedKeyAuthorization used to carry the expected key authorization from + // the RA to the VA. However, since this field is never presented to the user + // via the ACME API, it should not be on this type. + // + // Deprecated: use vapb.PerformValidationRequest.ExpectedKeyAuthorization instead. + // TODO(#7514): Remove this. ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"` // Contains information about URLs used or redirected to and IPs resolved and // used ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"` - // The time at which the server validated the challenge. Required by - // RFC8555 if status is valid. - Validated *time.Time `json:"validated,omitempty"` } // ExpectedKeyAuthorization computes the expected KeyAuthorization value for @@ -225,6 +222,8 @@ func (ch Challenge) RecordsSane() bool { switch ch.Type { case ChallengeTypeHTTP01: for _, rec := range ch.ValidationRecord { + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || rec.AddressUsed == nil || len(rec.AddressesResolved) == 0 { return false @@ -237,6 +236,8 @@ func (ch Challenge) RecordsSane() bool { if ch.ValidationRecord[0].URL != "" { return false } + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" || ch.ValidationRecord[0].AddressUsed == nil || len(ch.ValidationRecord[0].AddressesResolved) == 0 { return false @@ -245,6 +246,8 @@ func (ch Challenge) RecordsSane() bool { if len(ch.ValidationRecord) > 1 { return false } + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. if ch.ValidationRecord[0].Hostname == "" { return false } @@ -256,43 +259,18 @@ func (ch Challenge) RecordsSane() bool { return true } -// CheckConsistencyForClientOffer checks the fields of a challenge object before it is -// given to the client. -func (ch Challenge) CheckConsistencyForClientOffer() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // Before completion, the key authorization field should be empty - if ch.ProvidedKeyAuthorization != "" { - return fmt.Errorf("A response to this challenge was already submitted.") - } - return nil -} - -// CheckConsistencyForValidation checks the fields of a challenge object before it is -// given to the VA. -func (ch Challenge) CheckConsistencyForValidation() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // If the challenge is completed, then there should be a key authorization - return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization) -} - -// checkConsistency checks the sanity of a challenge object before issued to the client. -func (ch Challenge) checkConsistency() error { +// CheckPending ensures that a challenge object is pending and has a token. +// This is used before offering the challenge to the client, and before actually +// validating a challenge. +func (ch Challenge) CheckPending() error { if ch.Status != StatusPending { - return fmt.Errorf("The challenge is not pending.") + return fmt.Errorf("challenge is not pending") } - // There always needs to be a token - if !LooksLikeAToken(ch.Token) { - return fmt.Errorf("The token is missing.") + if !looksLikeAToken(ch.Token) { + return fmt.Errorf("token is missing or malformed") } + return nil } @@ -483,6 +461,12 @@ type SuggestedWindow struct { End time.Time `json:"end"` } +// IsWithin returns true if the given time is within the suggested window, +// inclusive of the start time and exclusive of the end time. +func (window SuggestedWindow) IsWithin(now time.Time) bool { + return !now.Before(window.Start) && now.Before(window.End) +} + // RenewalInfo is a type which is exposed to clients which query the renewalInfo // endpoint specified in draft-aaron-ari. type RenewalInfo struct { diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index d7fe0266..641521f1 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -25,7 +25,9 @@ import ( "time" "unicode" - "gopkg.in/go-jose/go-jose.v2" + "github.com/go-jose/go-jose/v4" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) const Unspecified = "Unspecified" @@ -74,9 +76,9 @@ func NewToken() string { var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`) -// LooksLikeAToken checks whether a string represents a 32-octet value in +// looksLikeAToken checks whether a string represents a 32-octet value in // the URL-safe base64 alphabet. -func LooksLikeAToken(token string) bool { +func looksLikeAToken(token string) bool { return tokenFormat.MatchString(token) } @@ -92,8 +94,7 @@ func Fingerprint256(data []byte) string { type Sha256Digest [sha256.Size]byte -// KeyDigest produces a Base64-encoded SHA256 digest of a -// provided public key. +// KeyDigest produces the SHA256 digest of a provided public key. func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) { switch t := key.(type) { case *jose.JSONWebKey: @@ -212,10 +213,83 @@ func IsAnyNilOrZero(vals ...interface{}) bool { switch v := val.(type) { case nil: return true + case bool: + if !v { + return true + } + case string: + if v == "" { + return true + } + case []string: + if len(v) == 0 { + return true + } + case byte: + // Byte is an alias for uint8 and will cover that case. + if v == 0 { + return true + } case []byte: if len(v) == 0 { return true } + case int: + if v == 0 { + return true + } + case int8: + if v == 0 { + return true + } + case int16: + if v == 0 { + return true + } + case int32: + if v == 0 { + return true + } + case int64: + if v == 0 { + return true + } + case uint: + if v == 0 { + return true + } + case uint16: + if v == 0 { + return true + } + case uint32: + if v == 0 { + return true + } + case uint64: + if v == 0 { + return true + } + case float32: + if v == 0 { + return true + } + case float64: + if v == 0 { + return true + } + case time.Time: + if v.IsZero() { + return true + } + case *timestamppb.Timestamp: + if v == nil || v.AsTime().IsZero() { + return true + } + case *durationpb.Duration: + if v == nil || v.AsDuration() == time.Duration(0) { + return true + } default: if reflect.ValueOf(v).IsZero() { return true diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go index 087a0181..04a075d3 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go @@ -39,6 +39,9 @@ var ( ) type Config struct { + // AllowedKeys enables or disables specific key algorithms and sizes. If + // nil, defaults to just those keys allowed by the Let's Encrypt CPS. + AllowedKeys *AllowedKeys // WeakKeyFile is the path to a JSON file containing truncated modulus hashes // of known weak RSA keys. If this config value is empty, then RSA modulus // hash checking will be disabled. @@ -54,6 +57,40 @@ type Config struct { FermatRounds int } +// AllowedKeys is a map of six specific key algorithm and size combinations to +// booleans indicating whether keys of that type are considered good. +type AllowedKeys struct { + // Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple + // of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes + // Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which + // have a known method to easily compute their private key, such as Debian Weak + // Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at + // common key sizes, so we restrict all issuance to those common key sizes. + RSA2048 bool + RSA3072 bool + RSA4096 bool + // Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid + // points on the NIST P-256, P-384, or P-521 elliptic curves. + ECDSAP256 bool + ECDSAP384 bool + ECDSAP521 bool +} + +// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's +// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA +// 4096, ECDSA 256 and ECDSA P384. +// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate +// If this is ever changed, the CP/CPS MUST be changed first. +func LetsEncryptCPS() AllowedKeys { + return AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + } +} + // ErrBadKey represents an error with a key. It is distinct from the various // ways in which an ACME request can have an erroneous key (BadPublicKeyError, // BadCSRError) because this library is used to check both JWS signing keys and @@ -74,28 +111,29 @@ type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error) // KeyPolicy determines which types of key may be used with various boulder // operations. type KeyPolicy struct { - AllowRSA bool // Whether RSA keys should be allowed. - AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed. - AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed. - weakRSAList *WeakRSAKeys - blockedList *blockedKeys - fermatRounds int - blockedCheck BlockedKeyCheckFunc + allowedKeys AllowedKeys + weakRSAList *WeakRSAKeys + blockedList *blockedKeys + fermatRounds int + blockedCheck BlockedKeyCheckFunc } -// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384. -// weakKeyFile contains the path to a JSON file containing truncated modulus -// hashes of known weak RSA keys. If this argument is empty RSA modulus hash -// checking will be disabled. blockedKeyFile contains the path to a YAML file -// containing Base64 encoded SHA256 hashes of pkix subject public keys that -// should be blocked. If this argument is empty then no blocked key checking is -// performed. -func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { +// NewPolicy returns a key policy based on the given configuration, with sane +// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys +// is used. If the config's WeakKeyFile or BlockedKeyFile paths are empty, those +// checks are disabled. If the config's FermatRounds is 0, Fermat Factorization +// is disabled. +func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { + if config == nil { + config = &Config{} + } kp := KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, - blockedCheck: bkc, + blockedCheck: bkc, + } + if config.AllowedKeys == nil { + kp.allowedKeys = LetsEncryptCPS() + } else { + kp.allowedKeys = *config.AllowedKeys } if config.WeakKeyFile != "" { keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile) @@ -264,44 +302,30 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) { // Simply use a whitelist for now. params := c.Params() switch { - case policy.AllowECDSANISTP256 && params == elliptic.P256().Params(): + case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params(): + return nil + case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params(): return nil - case policy.AllowECDSANISTP384 && params == elliptic.P384().Params(): + case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params(): return nil default: return badKey("ECDSA curve %v not allowed", params.Name) } } -// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple -// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes -// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which -// have a known method to easily compute their private key, such as Debian Weak -// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at -// common key sizes, so we restrict all issuance to those common key sizes. -var acceptableRSAKeySizes = map[int]bool{ - 2048: true, - 3072: true, - 4096: true, -} - // GoodKeyRSA determines if a RSA pubkey meets our requirements -func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { - if !policy.AllowRSA { - return badKey("RSA keys are not allowed") +func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error { + modulus := key.N + + err := policy.goodRSABitLen(key) + if err != nil { + return err } + if policy.weakRSAList != nil && policy.weakRSAList.Known(key) { return badKey("key is on a known weak RSA key list") } - modulus := key.N - - // See comment on acceptableRSAKeySizes above. - modulusBitLen := modulus.BitLen() - if !acceptableRSAKeySizes[modulusBitLen] { - return badKey("key size not supported: %d", modulusBitLen) - } - // Rather than support arbitrary exponents, which significantly increases // the size of the key space we allow, we restrict E to the defacto standard // RSA exponent 65537. There is no specific standards document that specifies @@ -341,6 +365,21 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { return nil } +func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error { + // See comment on AllowedKeys above. + modulusBitLen := key.N.BitLen() + switch { + case modulusBitLen == 2048 && policy.allowedKeys.RSA2048: + return nil + case modulusBitLen == 3072 && policy.allowedKeys.RSA3072: + return nil + case modulusBitLen == 4096 && policy.allowedKeys.RSA4096: + return nil + default: + return badKey("key size not supported: %d", modulusBitLen) + } +} + // Returns true iff integer i is divisible by any of the primes in smallPrimes. // // Short circuits; execution time is dependent on i. Do not use this on secret @@ -400,7 +439,7 @@ func checkPrimeFactorsTooClose(n *big.Int, rounds int) error { b2 := new(big.Int) b2.Mul(a, a).Sub(b2, n) - for i := 0; i < rounds; i++ { + for range rounds { // To see if b2 is a perfect square, we take its square root, square that, // and check to see if we got the same result back. bb.Sqrt(b2).Mul(bb, bb) diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go index 2cc76623..ec6c272a 100644 --- a/vendor/github.com/letsencrypt/boulder/probs/probs.go +++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go @@ -20,6 +20,8 @@ const ( BadRevocationReasonProblem = ProblemType("badRevocationReason") BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") CAAProblem = ProblemType("caa") + // ConflictProblem is a problem type that is not defined in RFC8555. + ConflictProblem = ProblemType("conflict") ConnectionProblem = ProblemType("connection") DNSProblem = ProblemType("dns") InvalidContactProblem = ProblemType("invalidContact") @@ -290,11 +292,11 @@ func Canceled(detail string, a ...any) *ProblemDetails { } } -// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict +// Conflict returns a ProblemDetails with a ConflictProblem and a 409 Conflict // status code. func Conflict(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: ConflictProblem, Detail: detail, HTTPStatus: http.StatusConflict, } diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index 842e8e24..00000000 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,205 +0,0 @@ -## Changelog - -### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 - - * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge - - Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. - - * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions - -### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 - - * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error - - Thanks to [@ellie](https://github.com/ellie) for the patch. - - * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible - - This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the - author happy until it affects real users. - - Thanks to [@maage](https://github.com/maage) for the patch. - -### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 - - * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments - - When reading comments \ are loaded correctly, but when writing they are then - replaced by \\. This leads to wrong comments when writing and reading multiple times. - - Thanks to [@doxsch](https://github.com/doxsch) for the patch. - -### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 - - * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references - - Thanks to [@sriv](https://github.com/sriv) for the patch. - -### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 - - * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference - - The change is include the key in the error message which is causing the circular - reference when parsing/loading the properties files. - - Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. - -### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 - - * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write - - This patch ensures that backslashes are escaped on write. Existing applications which - rely on the old behavior may need to be updated. - - Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. - - * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() - - Thanks to [@aliras1](https://github.com/aliras1) for the patch. - - * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - - * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - -### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - - * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request - - This patch ensures that in `LoadURL` the response body is always closed. - - Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. - -### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 - - * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading - - This adds the option to disable property expansion during loading. - - Thanks to [@kmala](https://github.com/kmala) for the patch. - -### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 - - * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. - - See PR for an example. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 - - * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value - - Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail - with a `circular reference error`. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - - Thanks to [@mgurov](https://github.com/mgurov) for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md index e2edda02..4872685f 100644 --- a/vendor/github.com/magiconair/properties/README.md +++ b/vendor/github.com/magiconair/properties/README.md @@ -1,12 +1,9 @@ [![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) # Overview -#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. - properties is a Go library for reading and writing properties files. It supports reading from multiple files or URLs and Spring style recursive @@ -99,30 +96,3 @@ $ go get -u github.com/magiconair/properties ## ToDo * Dump contents with passwords and secrets obscured - -## Updated Git tags - -#### 13 Feb 2018 - -I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags -and I've only recently learned that this doesn't play well with `git describe` 😞 - -I have replaced all lightweight tags with signed tags using this script which should -retain the commit date, name and email address. Please run `git pull --tags` to update them. - -Worst case you have to reclone the repo. - -```shell -#!/bin/bash -tag=$1 -echo "Updating $tag" -date=$(git show ${tag}^0 --format=%aD | head -1) -email=$(git show ${tag}^0 --format=%aE | head -1) -name=$(git show ${tag}^0 --format=%aN | head -1) -GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} -``` - -I apologize for the inconvenience. - -Frank - diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go index 8e6aa441..f5e252f8 100644 --- a/vendor/github.com/magiconair/properties/decode.go +++ b/vendor/github.com/magiconair/properties/decode.go @@ -189,12 +189,12 @@ func dec(p *Properties, key string, def *string, opts map[string]string, v refle for i := 0; i < v.NumField(); i++ { fv := v.Field(i) fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } if fk == "-" { continue } + if !fv.CanSet() { + return fmt.Errorf("cannot set %s", t.Field(i).Name) + } if key != "" { fk = key + "." + fk } diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go index 635368dc..6567e0c7 100644 --- a/vendor/github.com/magiconair/properties/load.go +++ b/vendor/github.com/magiconair/properties/load.go @@ -6,7 +6,7 @@ package properties import ( "fmt" - "io/ioutil" + "io" "net/http" "os" "strings" @@ -52,6 +52,15 @@ func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { return l.loadBytes(buf, l.Encoding) } +// LoadReader reads an io.Reader into a Properties struct. +func (l *Loader) LoadReader(r io.Reader) (*Properties, error) { + if buf, err := io.ReadAll(r); err != nil { + return nil, err + } else { + return l.loadBytes(buf, l.Encoding) + } +} + // LoadAll reads the content of multiple URLs or files in the given order into // a Properties struct. If IgnoreMissing is true then a 404 status code or // missing file will not be reported as error. Encoding sets the encoding for @@ -91,7 +100,7 @@ func (l *Loader) LoadAll(names []string) (*Properties, error) { // If IgnoreMissing is true then a missing file will not be // reported as error. func (l *Loader) LoadFile(filename string) (*Properties, error) { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { if l.IgnoreMissing && os.IsNotExist(err) { LogPrintf("properties: %s not found. skipping", filename) @@ -126,7 +135,7 @@ func (l *Loader) LoadURL(url string) (*Properties, error) { return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) } @@ -185,6 +194,12 @@ func LoadFile(filename string, enc Encoding) (*Properties, error) { return l.LoadAll([]string{filename}) } +// LoadReader reads an io.Reader into a Properties struct. +func LoadReader(r io.Reader, enc Encoding) (*Properties, error) { + l := &Loader{Encoding: enc} + return l.LoadReader(r) +} + // LoadFiles reads multiple files in the given order into // a Properties struct. If 'ignoreMissing' is true then // non-existent files will not be reported as error. @@ -224,6 +239,12 @@ func MustLoadString(s string) *Properties { return must(LoadString(s)) } +// MustLoadSReader reads an io.Reader into a Properties struct and +// panics on error. +func MustLoadReader(r io.Reader, enc Encoding) *Properties { + return must(LoadReader(r, enc)) +} + // MustLoadFile reads a file into a Properties struct and // panics on error. func MustLoadFile(filename string, enc Encoding) *Properties { diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index c7582349..ae634d1c 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,8 @@ +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + ## 1.5.0 * New option `IgnoreUntaggedFields` to ignore decoding to any fields diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 3a754ca7..c1f99da0 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -271,7 +271,11 @@ func TextUnmarshallerHookFunc() DecodeHookFuncType { if !ok { return data, nil } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { return nil, err } return result, nil diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 1efb22ac..7581806a 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -458,7 +458,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e var err error input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) + return fmt.Errorf("error decoding '%s': %w", name, err) } } @@ -1123,6 +1123,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) if valSlice.IsNil() || d.config.ZeroFields { // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) } // Accumulate any errors diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore index a69e2b0e..4b7c4eda 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.gitignore +++ b/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -3,4 +3,5 @@ fuzz/ cmd/tomll/tomll cmd/tomljson/tomljson cmd/tomltestgen/tomltestgen -dist \ No newline at end of file +dist +tests/ diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index 1d8b69e6..ec52857a 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -1,3 +1,4 @@ +version: 2 before: hooks: - go mod tidy diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md index 04dd12bc..96ecf9e2 100644 --- a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md +++ b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -165,25 +165,22 @@ Checklist: ### New release -1. Decide on the next version number. Use semver. -2. Generate release notes using [`gh`][gh]. Example: +1. Decide on the next version number. Use semver. Review commits since last + version to assess. +2. Tag release. For example: ``` -$ gh api -X POST \ - -F tag_name='v2.0.0-beta.5' \ - -F target_commitish='v2' \ - -F previous_tag_name='v2.0.0-beta.4' \ - --jq '.body' \ - repos/pelletier/go-toml/releases/generate-notes +git checkout v2 +git pull +git tag v2.2.0 +git push --tags ``` -3. Look for "Other changes". That would indicate a pull request not labeled - properly. Tweak labels and pull request titles until changelog looks good for - users. -4. [Draft new release][new-release]. -5. Fill tag and target with the same value used to generate the changelog. -6. Set title to the new tag value. -7. Paste the generated changelog. -8. Check "create discussion", in the "Releases" category. -9. Check pre-release if new version is an alpha or beta. +3. CI automatically builds a draft Github release. Review it and edit as + necessary. Look for "Other changes". That would indicate a pull request not + labeled properly. Tweak labels and pull request titles until changelog looks + good for users. +4. Check "create discussion" box, in the "Releases" category. +5. If new version is an alpha or beta only, check pre-release box. + [issues-tracker]: https://github.com/pelletier/go-toml/issues [bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index 63b92f3b..0755e556 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -98,9 +98,9 @@ Given the following struct, let's see how to read it and write it as TOML: ```go type MyConfig struct { - Version int - Name string - Tags []string + Version int + Name string + Tags []string } ``` @@ -119,7 +119,7 @@ tags = ["go", "toml"] var cfg MyConfig err := toml.Unmarshal([]byte(doc), &cfg) if err != nil { - panic(err) + panic(err) } fmt.Println("version:", cfg.Version) fmt.Println("name:", cfg.Name) @@ -140,14 +140,14 @@ as a TOML document: ```go cfg := MyConfig{ - Version: 2, - Name: "go-toml", - Tags: []string{"go", "toml"}, + Version: 2, + Name: "go-toml", + Tags: []string{"go", "toml"}, } b, err := toml.Marshal(cfg) if err != nil { - panic(err) + panic(err) } fmt.Println(string(b)) @@ -175,17 +175,17 @@ the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. Execution time speedup compared to other Go TOML libraries: - - - - - - - - - - - + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x1.9x
Marshal/ReferenceFile/map-21.7x1.8x
Marshal/ReferenceFile/struct-22.2x2.5x
Unmarshal/HugoFrontMatter-22.9x2.9x
Unmarshal/ReferenceFile/map-22.6x2.9x
Unmarshal/ReferenceFile/struct-24.4x5.3x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
See more

The table above has the results of the most common use-cases. The table below @@ -193,22 +193,22 @@ contains the results of all benchmarks, including unrealistic ones. It is provided for completeness.

- - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.9x
Marshal/SimpleDocument/struct-22.7x4.2x
Unmarshal/SimpleDocument/map-24.5x3.1x
Unmarshal/SimpleDocument/struct-26.2x3.9x
UnmarshalDataset/example-23.1x3.5x
UnmarshalDataset/code-22.3x3.1x
UnmarshalDataset/twitter-22.5x2.6x
UnmarshalDataset/citm_catalog-22.1x2.2x
UnmarshalDataset/canada-21.6x1.3x
UnmarshalDataset/config-24.3x3.2x
[Geo mean]2.7x2.8x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x

This table can be generated with ./ci.sh benchmark -a -html.

@@ -233,24 +233,24 @@ Go-toml provides three handy command line tools: * `tomljson`: Reads a TOML file and outputs its JSON representation. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest - $ tomljson --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` * `jsontoml`: Reads a JSON file and outputs a TOML representation. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest - $ jsontoml --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` * `tomll`: Lints and reformats a TOML file. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest - $ tomll --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` ### Docker image @@ -261,7 +261,7 @@ Those tools are also available as a [Docker image][docker]. For example, to use docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml ``` -Multiple versions are availble on [ghcr.io][docker]. +Multiple versions are available on [ghcr.io][docker]. [docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml @@ -293,16 +293,16 @@ element in the interface to decode the object. For example: ```go type inner struct { - B interface{} + B interface{} } type doc struct { - A interface{} + A interface{} } d := doc{ - A: inner{ - B: "Before", - }, + A: inner{ + B: "Before", + }, } data := ` @@ -341,7 +341,7 @@ contained in the doc is superior to the capacity of the array. For example: ```go type doc struct { - A [2]string + A [2]string } d := doc{} err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) @@ -565,10 +565,11 @@ complete solutions exist out there. ## Versioning -Go-toml follows [Semantic Versioning](https://semver.org). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). +Expect for parts explicitly marked otherwise, go-toml follows [Semantic +Versioning](https://semver.org). The supported version of +[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this +document. The last two major versions of Go are supported (see [Go Release +Policy](https://golang.org/doc/devel/release.html#policy)). ## License diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md index b2f21cfc..d4d554fd 100644 --- a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md +++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -2,9 +2,6 @@ ## Supported Versions -Use this section to tell people about which versions of your project are -currently being supported with security updates. - | Version | Supported | | ---------- | ------------------ | | Latest 2.x | :white_check_mark: | diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh index 9ae8b753..86217a9b 100644 --- a/vendor/github.com/pelletier/go-toml/v2/ci.sh +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -77,7 +77,7 @@ cover() { pushd "$dir" go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./... - cat coverage.out.tmp | grep -v fuzz | grep -v testsuite | grep -v tomltestgen | grep -v gotoml-test-decoder > coverage.out + grep -Ev '(fuzz|testsuite|tomltestgen|gotoml-test-decoder|gotoml-test-encoder)' coverage.out.tmp > coverage.out go tool cover -func=coverage.out echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 popd @@ -152,7 +152,7 @@ bench() { fi export GOMAXPROCS=2 - nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}" + go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}" popd if [ "${branch}" != "HEAD" ]; then @@ -161,10 +161,12 @@ bench() { } fmktemp() { - if mktemp --version|grep GNU >/dev/null; then - mktemp --suffix=-$1; + if mktemp --version &> /dev/null; then + # GNU + mktemp --suffix=-$1 else - mktemp -t $1; + # BSD + mktemp -t $1 fi } @@ -184,12 +186,14 @@ with open(sys.argv[1]) as f: lines.append(line.split(',')) results = [] -for line in reversed(lines[1:]): +for line in reversed(lines[2:]): + if len(line) < 8 or line[0] == "": + continue v2 = float(line[1]) results.append([ line[0].replace("-32", ""), "%.1fx" % (float(line[3])/v2), # v1 - "%.1fx" % (float(line[5])/v2), # bs + "%.1fx" % (float(line[7])/v2), # bs ]) # move geomean to the end results.append(results[0]) @@ -260,10 +264,10 @@ benchmark() { if [ "$1" = "-html" ]; then tmpcsv=`fmktemp csv` - benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv benchstathtml $tmpcsv else - benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt + benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt fi rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go index 40e23f83..76df2d5b 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -57,7 +57,11 @@ type SeenTracker struct { currentIdx int } -var pool sync.Pool +var pool = sync.Pool{ + New: func() interface{} { + return &SeenTracker{} + }, +} func (s *SeenTracker) reset() { // Always contains a root element at index 0. @@ -149,8 +153,9 @@ func (s *SeenTracker) setExplicitFlag(parentIdx int) { // CheckExpression takes a top-level node and checks that it does not contain // keys that have been seen in previous calls, and validates that types are -// consistent. -func (s *SeenTracker) CheckExpression(node *unstable.Node) error { +// consistent. It returns true if it is the first time this node's key is seen. +// Useful to clear array tables on first use. +func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { if s.entries == nil { s.reset() } @@ -166,7 +171,7 @@ func (s *SeenTracker) CheckExpression(node *unstable.Node) error { } } -func (s *SeenTracker) checkTable(node *unstable.Node) error { +func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -192,7 +197,7 @@ func (s *SeenTracker) checkTable(node *unstable.Node) error { } else { entry := s.entries[idx] if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } parentIdx = idx @@ -201,25 +206,27 @@ func (s *SeenTracker) checkTable(node *unstable.Node) error { k := it.Node().Data idx := s.find(parentIdx, k) + first := false if idx >= 0 { kind := s.entries[idx].kind if kind != tableKind { - return fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) } if s.entries[idx].explicit { - return fmt.Errorf("toml: table %s already exists", string(k)) + return false, fmt.Errorf("toml: table %s already exists", string(k)) } s.entries[idx].explicit = true } else { idx = s.create(parentIdx, k, tableKind, true, false) + first = true } s.currentIdx = idx - return nil + return first, nil } -func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { +func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -242,7 +249,7 @@ func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { } else { entry := s.entries[idx] if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } @@ -252,22 +259,23 @@ func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { k := it.Node().Data idx := s.find(parentIdx, k) - if idx >= 0 { + firstTime := idx < 0 + if firstTime { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } else { kind := s.entries[idx].kind if kind != arrayTableKind { - return fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) } s.clear(idx) - } else { - idx = s.create(parentIdx, k, arrayTableKind, true, false) } s.currentIdx = idx - return nil + return firstTime, nil } -func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { +func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { parentIdx := s.currentIdx it := node.Key() @@ -281,11 +289,11 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { } else { entry := s.entries[idx] if it.IsLast() { - return fmt.Errorf("toml: key %s is already defined", string(k)) + return false, fmt.Errorf("toml: key %s is already defined", string(k)) } else if entry.kind != tableKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } else if entry.explicit { - return fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) } } @@ -303,45 +311,39 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { return s.checkArray(value) } - return nil + return false, nil } -func (s *SeenTracker) checkArray(node *unstable.Node) error { +func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { it := node.Children() for it.Next() { n := it.Node() switch n.Kind { case unstable.InlineTable: - err := s.checkInlineTable(n) + first, err = s.checkInlineTable(n) if err != nil { - return err + return false, err } case unstable.Array: - err := s.checkArray(n) + first, err = s.checkArray(n) if err != nil { - return err + return false, err } } } - return nil + return first, nil } -func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { - if pool.New == nil { - pool.New = func() interface{} { - return &SeenTracker{} - } - } - +func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { s = pool.Get().(*SeenTracker) s.reset() it := node.Children() for it.Next() { n := it.Node() - err := s.checkKeyValue(n) + first, err = s.checkKeyValue(n) if err != nil { - return err + return false, err } } @@ -352,5 +354,5 @@ func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { // redefinition of its keys: check* functions cannot walk into // a value. pool.Put(s) - return nil + return first, nil } diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 6fe78533..161acd93 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -3,11 +3,12 @@ package toml import ( "bytes" "encoding" + "encoding/json" "fmt" "io" "math" "reflect" - "sort" + "slices" "strconv" "strings" "time" @@ -37,10 +38,11 @@ type Encoder struct { w io.Writer // global settings - tablesInline bool - arraysMultiline bool - indentSymbol string - indentTables bool + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool + marshalJsonNumbers bool } // NewEncoder returns a new Encoder that writes to w. @@ -87,6 +89,17 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder { return enc } +// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a +// float or integer instead of relying on TextMarshaler to emit a string. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { + enc.marshalJsonNumbers = indent + return enc +} + // Encode writes a TOML representation of v to the stream. // // If v cannot be represented to TOML it returns an error. @@ -252,10 +265,22 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e return append(b, x.String()...), nil case LocalDateTime: return append(b, x.String()...), nil + case json.Number: + if enc.marshalJsonNumbers { + if x == "" { /// Useful zero value. + return append(b, "0"...), nil + } else if v, err := x.Int64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(v)) + } else if f, err := x.Float64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(f)) + } else { + return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) + } + } } hasTextMarshaler := v.Type().Implements(textMarshalerType) - if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { if !hasTextMarshaler { v = v.Addr() } @@ -606,6 +631,18 @@ func (enc *Encoder) keyToString(k reflect.Value) (string, error) { return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) } return string(keyB), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + return strconv.FormatInt(k.Int(), 10), nil + + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + return strconv.FormatUint(k.Uint(), 10), nil + + case keyType.Kind() == reflect.Float32: + return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil + + case keyType.Kind() == reflect.Float64: + return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil } return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } @@ -643,8 +680,8 @@ func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte } func sortEntriesByKey(e []entry) { - sort.Slice(e, func(i, j int) bool { - return e[i].Key < e[j].Key + slices.SortFunc(e, func(a, b entry) int { + return strings.Compare(a.Key, b.Key) }) } @@ -707,6 +744,8 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) + } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + walkStruct(ctx, t, f.Elem()) } continue } else { @@ -924,7 +963,7 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { if !v.IsValid() { return false } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { return false } @@ -998,6 +1037,10 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect. scratch = enc.commented(ctx.commented, scratch) + if enc.indentTables { + scratch = enc.indent(ctx.indent, scratch) + } + scratch = append(scratch, "[["...) for i, k := range ctx.parentKey { diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 868c74c1..c3df8bee 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "reflect" + "strconv" "strings" "sync/atomic" "time" @@ -21,10 +21,8 @@ import ( // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { - p := unstable.Parser{} - p.Reset(data) - d := decoder{p: &p} - + d := decoder{} + d.p.Reset(data) return d.FromParser(v) } @@ -35,6 +33,9 @@ type Decoder struct { // global settings strict bool + + // toggles unmarshaler interface + unmarshalerInterface bool } // NewDecoder creates a new Decoder that will read from r. @@ -54,6 +55,24 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { return d } +// EnableUnmarshalerInterface allows to enable unmarshaler interface. +// +// With this feature enabled, types implementing the unstable/Unmarshaler +// interface can be decoded from any structure of the document. It allows types +// that don't have a straightfoward TOML representation to provide their own +// decoding logic. +// +// Currently, types can only decode from a single value. Tables and array tables +// are not supported. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (d *Decoder) EnableUnmarshalerInterface() *Decoder { + d.unmarshalerInterface = true + return d +} + // Decode the whole content of r into v. // // By default, values in the document that don't exist in the target Go value @@ -96,26 +115,25 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { // Inline Table -> same as Table // Array of Tables -> same as Array and Table func (d *Decoder) Decode(v interface{}) error { - b, err := ioutil.ReadAll(d.r) + b, err := io.ReadAll(d.r) if err != nil { return fmt.Errorf("toml: %w", err) } - p := unstable.Parser{} - p.Reset(b) dec := decoder{ - p: &p, strict: strict{ Enabled: d.strict, }, + unmarshalerInterface: d.unmarshalerInterface, } + dec.p.Reset(b) return dec.FromParser(v) } type decoder struct { // Which parser instance in use for this decoding session. - p *unstable.Parser + p unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression @@ -127,6 +145,10 @@ type decoder struct { // need to be skipped. skipUntilTable bool + // Flag indicating that the current array/slice table should be cleared because + // it is the first encounter of an array table. + clearArrayTable bool + // Tracks position in Go arrays. // This is used when decoding [[array tables]] into Go arrays. Given array // tables are separate TOML expression, we need to keep track of where we @@ -139,6 +161,9 @@ type decoder struct { // Strict mode strict strict + // Flag that enables/disables unmarshaler interface. + unmarshalerInterface bool + // Current context for the error. errorContext *errorContext } @@ -246,9 +271,10 @@ Rules for the unmarshal code: func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { var x reflect.Value var err error + var first bool // used for to clear array tables on first use if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { - err = d.seen.CheckExpression(expr) + first, err = d.seen.CheckExpression(expr) if err != nil { return err } @@ -267,6 +293,7 @@ func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) err case unstable.ArrayTable: d.skipUntilTable = false d.strict.EnterArrayTable(expr) + d.clearArrayTable = first x, err = d.handleArrayTable(expr.Key(), v) default: panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) @@ -307,6 +334,10 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec reflect.Copy(nelem, elem) elem = nelem } + if d.clearArrayTable && elem.Len() > 0 { + elem.SetLen(0) + d.clearArrayTable = false + } } return d.handleArrayTableCollectionLast(key, elem) case reflect.Ptr: @@ -325,6 +356,10 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec return v, nil case reflect.Slice: + if d.clearArrayTable && v.Len() > 0 { + v.SetLen(0) + d.clearArrayTable = false + } elemType := v.Type().Elem() var elem reflect.Value if elemType.Kind() == reflect.Interface { @@ -576,7 +611,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { break } - err := d.seen.CheckExpression(expr) + _, err := d.seen.CheckExpression(expr) if err != nil { return reflect.Value{}, err } @@ -634,6 +669,14 @@ func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { v = initAndDereferencePointer(v) } + if d.unmarshalerInterface { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + return outi.UnmarshalTOML(value) + } + } + } + ok, err := d.tryTextUnmarshaler(value, v) if ok || err != nil { return err @@ -1031,12 +1074,39 @@ func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, } return mk, nil - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + case reflect.PointerTo(keyType).Implements(textUnmarshalerType): mk := reflect.New(keyType) if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk.Elem(), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + key, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + key, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + + case keyType.Kind() == reflect.Float32: + key, err := strconv.ParseFloat(string(data), 32) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float32(key)), nil + + case keyType.Kind() == reflect.Float64: + key, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float64(key)), nil } return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } @@ -1097,9 +1167,9 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node f := fieldByIndex(v, path) - if !f.CanSet() { - // If the field is not settable, need to take a slower path and make a copy of - // the struct itself to a new location. + if !f.CanAddr() { + // If the field is not addressable, need to take a slower path and + // make a copy of the struct itself to a new location. nvp := reflect.New(v.Type()) nvp.Elem().Set(v) v = nvp.Elem() diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go new file mode 100644 index 00000000..00cfd6de --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go @@ -0,0 +1,7 @@ +package unstable + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a TOML document. +type Unmarshaler interface { + UnmarshalTOML(value *Node) error +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go index a36146b8..d0424674 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -43,7 +43,8 @@ func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]Accepted // If *any* signature is found to be incorrect, it is skipped var acceptedKeys []AcceptedKey usedKeyids := make(map[string]string) - unverified_providers := ev.providers + unverified_providers := make([]Verifier, len(ev.providers)) + copy(unverified_providers, ev.providers) for _, s := range e.Signatures { sig, err := b64Decode(s.Sig) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go index f3e6c201..691091af 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go @@ -11,7 +11,10 @@ import ( "os" ) -const ECDSAKeyType = "ecdsa" +const ( + ECDSAKeyType = "ecdsa" + ECDSAKeyScheme = "ecdsa-sha2-nistp256" +) // ECDSASignerVerifier is a dsse.SignerVerifier compliant interface to sign and // verify signatures using ECDSA keys. @@ -89,6 +92,11 @@ func (sv *ECDSASignerVerifier) Public() crypto.PublicKey { // LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in // a file in the custom securesystemslib format. +// +// Deprecated: use LoadKey(). The custom serialization format has been +// deprecated. Use +// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py +// to convert your key. func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go index 0a2210cf..d954e14b 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go @@ -88,6 +88,11 @@ func (sv *ED25519SignerVerifier) Public() crypto.PublicKey { // LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored // in a file in the custom securesystemslib format. +// +// Deprecated: use LoadKey(). The custom serialization format has been +// deprecated. Use +// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py +// to convert your key. func LoadED25519KeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go index b039659a..2abfcb27 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go @@ -94,6 +94,11 @@ func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey { // LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a // file. +// +// Deprecated: use LoadKey(). The custom serialization format has been +// deprecated. Use +// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py +// to convert your key. func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { @@ -103,9 +108,13 @@ func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { return LoadRSAPSSKeyFromBytes(contents) } -// LoadRSAPSSKeyFromBytes is a function that takes a byte array as input. This byte array should represent a PEM encoded RSA key, as PEM encoding is required. -// The function returns an SSLibKey instance, which is a struct that holds the key data. - +// LoadRSAPSSKeyFromBytes is a function that takes a byte array as input. This +// byte array should represent a PEM encoded RSA key, as PEM encoding is +// required. The function returns an SSLibKey instance, which is a struct that +// holds the key data. +// +// Deprecated: use LoadKey() for all key types, RSA is no longer the only key +// that uses PEM serialization. func LoadRSAPSSKeyFromBytes(contents []byte) (*SSLibKey, error) { pemData, keyObj, err := decodeAndParsePEM(contents) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go index 85cae65d..3a8259df 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go @@ -1,7 +1,13 @@ package signerverifier import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/hex" "errors" + "strings" ) var KeyIDHashAlgorithms = []string{"sha256", "sha512"} @@ -12,6 +18,7 @@ var ( ErrUnknownKeyType = errors.New("unknown key type") ErrInvalidThreshold = errors.New("threshold is either less than 1 or greater than number of provided public keys") ErrInvalidKey = errors.New("key object has no value") + ErrInvalidPEM = errors.New("unable to parse PEM block") ) const ( @@ -34,3 +41,106 @@ type KeyVal struct { Identity string `json:"identity,omitempty"` Issuer string `json:"issuer,omitempty"` } + +// LoadKey returns an SSLibKey object when provided a PEM encoded key. +// Currently, RSA, ED25519, and ECDSA keys are supported. +func LoadKey(keyBytes []byte) (*SSLibKey, error) { + pemBlock, rawKey, err := decodeAndParsePEM(keyBytes) + if err != nil { + return nil, err + } + + var key *SSLibKey + switch k := rawKey.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return nil, err + } + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: RSAKeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))), + }, + Scheme: RSAKeyScheme, + } + + case *rsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public()) + if err != nil { + return nil, err + } + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: RSAKeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))), + Private: strings.TrimSpace(string(generatePEMBlock(pemBlock.Bytes, pemBlock.Type))), + }, + Scheme: RSAKeyScheme, + } + + case ed25519.PublicKey: + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: ED25519KeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(hex.EncodeToString(k)), + }, + Scheme: ED25519KeyType, + } + + case ed25519.PrivateKey: + pubKeyBytes := k.Public() + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: ED25519KeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes.(ed25519.PublicKey))), + Private: strings.TrimSpace(hex.EncodeToString(k)), + }, + Scheme: ED25519KeyType, + } + + case *ecdsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return nil, err + } + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: ECDSAKeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))), + }, + Scheme: ECDSAKeyScheme, + } + + case *ecdsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public()) + if err != nil { + return nil, err + } + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: ECDSAKeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))), + Private: strings.TrimSpace(string(generatePEMBlock(pemBlock.Bytes, PrivateKeyPEM))), + }, + Scheme: ECDSAKeyScheme, + } + + default: + return nil, ErrUnknownKeyType + } + + keyID, err := calculateKeyID(key) + if err != nil { + return nil, err + } + key.KeyID = keyID + + return key, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go index e77e07f4..e8a30b59 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go @@ -27,6 +27,9 @@ var ( // LoadKeyFromSSLibBytes returns a pointer to a Key instance created from the // contents of the bytes. The key contents are expected to be in the custom // securesystemslib format. +// +// Deprecated: use LoadKey() for all key types, RSA is no longer the only key +// that uses PEM serialization. func LoadKeyFromSSLibBytes(contents []byte) (*SSLibKey, error) { var key *SSLibKey if err := json.Unmarshal(contents, &key); err != nil { diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go b/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go index 543af56f..8ee624e9 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go @@ -15,6 +15,9 @@ package blob import ( + "crypto/sha256" + "crypto/sha512" + "encoding/hex" "fmt" "io" "net/http" @@ -72,3 +75,35 @@ func LoadFileOrURL(fileRef string) ([]byte, error) { } return raw, nil } + +func LoadFileOrURLWithChecksum(fileRef string, checksum string) ([]byte, error) { + checksumParts := strings.Split(checksum, ":") + if len(checksumParts) >= 3 { + return nil, fmt.Errorf("wrong checksum input format, must have at most 1 colon: %s", checksum) + } + + checksumAlgo := sha256.New() + checksumValue := checksumParts[len(checksumParts)-1] + if len(checksumParts) == 2 { + switch checksumParts[0] { + case "sha256": // the default set above + case "sha512": + checksumAlgo = sha512.New() + default: + return nil, fmt.Errorf("unsupported checksum algorithm: %s", checksumParts[0]) + } + } + + fileContent, err := LoadFileOrURL(fileRef) + if err != nil { + return nil, err + } + + checksumAlgo.Write(fileContent) + computedChecksum := hex.EncodeToString(checksumAlgo.Sum(nil)) + if computedChecksum != checksumValue { + return nil, fmt.Errorf("incorrect checksum for file %s: expected %s but got %s", fileRef, checksumValue, computedChecksum) + } + + return fileContent, nil +} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/protobundle.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/protobundle.go new file mode 100644 index 00000000..a26e2fb2 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/protobundle.go @@ -0,0 +1,65 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + protorekor "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/tle" +) + +const bundleV03MediaType = "application/vnd.dev.sigstore.bundle.v0.3+json" + +func MakeProtobufBundle(hint string, rawCert []byte, rekorEntry *models.LogEntryAnon, timestampBytes []byte) (*protobundle.Bundle, error) { + bundle := &protobundle.Bundle{MediaType: bundleV03MediaType} + + if hint != "" { + bundle.VerificationMaterial = &protobundle.VerificationMaterial{ + Content: &protobundle.VerificationMaterial_PublicKey{ + PublicKey: &protocommon.PublicKeyIdentifier{ + Hint: hint, + }, + }, + } + } else if len(rawCert) > 0 { + bundle.VerificationMaterial = &protobundle.VerificationMaterial{ + Content: &protobundle.VerificationMaterial_Certificate{ + Certificate: &protocommon.X509Certificate{ + RawBytes: rawCert, + }, + }, + } + } + + if len(timestampBytes) > 0 { + bundle.VerificationMaterial.TimestampVerificationData = &protobundle.TimestampVerificationData{ + Rfc3161Timestamps: []*protocommon.RFC3161SignedTimestamp{ + {SignedTimestamp: timestampBytes}, + }, + } + } + + if rekorEntry != nil { + tlogEntry, err := tle.GenerateTransparencyLogEntry(*rekorEntry) + if err != nil { + return nil, err + } + bundle.VerificationMaterial.TlogEntries = []*protorekor.TransparencyLogEntry{tlogEntry} + } + + return bundle, nil +} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go index a2960e08..016687ac 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go @@ -58,6 +58,7 @@ const ( VariableSigstoreRootFile Variable = "SIGSTORE_ROOT_FILE" VariableSigstoreRekorPublicKey Variable = "SIGSTORE_REKOR_PUBLIC_KEY" VariableSigstoreIDToken Variable = "SIGSTORE_ID_TOKEN" //nolint:gosec + VariableSigstoreTSACertificateFile Variable = "SIGSTORE_TSA_CERTIFICATE_FILE" // Other external environment variables VariableGitHubHost Variable = "GITHUB_HOST" @@ -138,7 +139,12 @@ var ( Sensitive: false, External: true, }, - + VariableSigstoreTSACertificateFile: { + Description: "path to the concatenated PEM-encoded TSA certificate file (leaf, intermediate(s), root) used by Sigstore", + Expects: "path to the TSA certificate file", + Sensitive: false, + External: true, + }, VariableGitHubHost: { Description: "is URL of the GitHub Enterprise instance", Expects: "string with the URL of GitHub Enterprise instance", diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/fetch.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/fetch.go index ff81be22..709333ac 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/fetch.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/fetch.go @@ -74,8 +74,15 @@ func FetchSignaturesForReference(_ context.Context, ref name.Reference, opts ... if err != nil { return nil, err } + sigs, err := FetchSignatures(simg) + if err != nil { + return nil, fmt.Errorf("%s: %w", ref, err) + } + return sigs, nil +} - sigs, err := simg.Signatures() +func FetchSignatures(se oci.SignedEntity) ([]SignedPayload, error) { + sigs, err := se.Signatures() if err != nil { return nil, fmt.Errorf("remote image: %w", err) } @@ -84,7 +91,7 @@ func FetchSignaturesForReference(_ context.Context, ref name.Reference, opts ... return nil, fmt.Errorf("fetching signatures: %w", err) } if len(l) == 0 { - return nil, fmt.Errorf("no signatures associated with %s", ref) + return nil, errors.New("no signatures associated") } if len(l) > maxAllowedSigsOrAtts { return nil, fmt.Errorf("maximum number of signatures on an image is %d, found %d", maxAllowedSigsOrAtts, len(l)) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go index 6703635a..b1246913 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go @@ -24,7 +24,7 @@ import ( "github.com/sigstore/cosign/v2/internal/ui" "github.com/sigstore/cosign/v2/pkg/cosign" "github.com/sigstore/cosign/v2/pkg/cosign/env" - "github.com/xanzy/go-gitlab" + gitlab "gitlab.com/gitlab-org/api/client-go" ) const ( diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go index 9adc2252..ed5bbc16 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go @@ -58,7 +58,6 @@ type Keys struct { public crypto.PublicKey } -// TODO(jason): Move this to an internal package. type KeysBytes struct { PrivateBytes []byte PublicBytes []byte @@ -69,12 +68,17 @@ func (k *KeysBytes) Password() []byte { return k.password } -// TODO(jason): Move this to an internal package. +// GeneratePrivateKey generates an ECDSA private key with the P-256 curve. func GeneratePrivateKey() (*ecdsa.PrivateKey, error) { return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) } -// TODO(jason): Move this to the only place it's used in cmd/cosign/cli/importkeypair, and unexport it. +// ImportKeyPair imports a key pair from a file containing a PEM-encoded +// private key encoded with a password provided by the 'pf' function. +// The private key can be in one of the following formats: +// - RSA private key (PKCS #1) +// - ECDSA private key +// - PKCS #8 private key (RSA, ECDSA or ED25519). func ImportKeyPair(keyPath string, pf PassFunc) (*KeysBytes, error) { kb, err := os.ReadFile(filepath.Clean(keyPath)) if err != nil { @@ -180,7 +184,6 @@ func marshalKeyPair(ptype string, keypair Keys, pf PassFunc) (key *KeysBytes, er }, nil } -// TODO(jason): Move this to an internal package. func GenerateKeyPair(pf PassFunc) (*KeysBytes, error) { priv, err := GeneratePrivateKey() if err != nil { @@ -191,7 +194,7 @@ func GenerateKeyPair(pf PassFunc) (*KeysBytes, error) { return marshalKeyPair(SigstorePrivateKeyPemType, Keys{priv, priv.Public()}, pf) } -// TODO(jason): Move this to an internal package. +// PemToECDSAKey marshals and returns the PEM-encoded ECDSA public key. func PemToECDSAKey(pemBytes []byte) (*ecdsa.PublicKey, error) { pub, err := cryptoutils.UnmarshalPEMToPublicKey(pemBytes) if err != nil { @@ -204,7 +207,8 @@ func PemToECDSAKey(pemBytes []byte) (*ecdsa.PublicKey, error) { return ecdsaPub, nil } -// TODO(jason): Move this to pkg/signature, the only place it's used, and unimport it. +// LoadPrivateKey loads a cosign PEM private key encrypted with the given passphrase, +// and returns a SignerVerifier instance. The private key must be in the PKCS #8 format. func LoadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { // Decrypt first p, _ := pem.Decode(key) @@ -219,7 +223,6 @@ func LoadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { if err != nil { return nil, fmt.Errorf("decrypt: %w", err) } - pk, err := x509.ParsePKCS8PrivateKey(x509Encoded) if err != nil { return nil, fmt.Errorf("parsing private key: %w", err) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/kubernetes/client.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/kubernetes/client.go index c89a4e0b..5c0f59b9 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/kubernetes/client.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/kubernetes/client.go @@ -17,9 +17,9 @@ package kubernetes import ( "fmt" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/kubernetes" - utilversion "k8s.io/apimachinery/pkg/util/version" // Initialize all known client auth plugins _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/rest" diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/kubernetes/secret.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/kubernetes/secret.go index b05c235e..11289888 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/kubernetes/secret.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/kubernetes/secret.go @@ -21,12 +21,11 @@ import ( "os" "strings" + "github.com/sigstore/cosign/v2/pkg/cosign" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - - "github.com/sigstore/cosign/v2/pkg/cosign" ) const ( diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tlog.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tlog.go index 83d6f61f..a9379ba9 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tlog.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tlog.go @@ -32,9 +32,6 @@ import ( "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" - "github.com/transparency-dev/merkle/proof" - "github.com/transparency-dev/merkle/rfc6962" - "github.com/sigstore/cosign/v2/internal/ui" "github.com/sigstore/cosign/v2/pkg/cosign/bundle" "github.com/sigstore/cosign/v2/pkg/cosign/env" @@ -49,6 +46,8 @@ import ( intoto_v001 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.1" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/tuf" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" ) // This is the rekor transparency log public key target name diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tsa.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tsa.go new file mode 100644 index 00000000..c2032f39 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tsa.go @@ -0,0 +1,154 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cosign + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "os" + + "github.com/sigstore/cosign/v2/pkg/cosign/env" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/tuf" +) + +const ( + tsaLeafCertStr = `tsa_leaf.crt.pem` + tsaRootCertStr = `tsa_root.crt.pem` + tsaIntermediateCertStrPattern = `tsa_intermediate_%d.crt.pem` +) + +type TSACertificates struct { + LeafCert *x509.Certificate + IntermediateCerts []*x509.Certificate + RootCert []*x509.Certificate +} + +type GetTargetStub func(ctx context.Context, usage tuf.UsageKind, names []string) ([]byte, error) + +func GetTufTargets(ctx context.Context, usage tuf.UsageKind, names []string) ([]byte, error) { + tufClient, err := tuf.NewFromEnv(ctx) + if err != nil { + return nil, fmt.Errorf("error creating TUF client: %w", err) + } + targets, err := tufClient.GetTargetsByMeta(usage, names) + if err != nil { + return nil, fmt.Errorf("error fetching targets by metadata with usage %v: %w", usage, err) + } + + var buffer bytes.Buffer + for _, target := range targets { + buffer.Write(target.Target) + buffer.WriteByte('\n') + } + return buffer.Bytes(), nil +} + +func isTufTargetExist(ctx context.Context, name string) (bool, error) { + tufClient, err := tuf.NewFromEnv(ctx) + if err != nil { + return false, fmt.Errorf("error creating TUF client: %w", err) + } + _, err = tufClient.GetTarget(name) + if err != nil { + return false, nil + } + return true, nil +} + +// GetTSACerts retrieves trusted TSA certificates from the embedded or cached +// TUF root. If expired, makes a network call to retrieve the updated targets. +// By default, the certificates come from TUF, but you can override this for test +// purposes by using an env variable `SIGSTORE_TSA_CERTIFICATE_FILE` or a file path +// specified in `certChainPath`. If using an alternate, the file should be in PEM format. +func GetTSACerts(ctx context.Context, certChainPath string, fn GetTargetStub) (*TSACertificates, error) { + altTSACert := env.Getenv(env.VariableSigstoreTSACertificateFile) + + var raw []byte + var err error + var exists bool + switch { + case altTSACert != "": + raw, err = os.ReadFile(altTSACert) + case certChainPath != "": + raw, err = os.ReadFile(certChainPath) + default: + certNames := []string{tsaLeafCertStr, tsaRootCertStr} + for i := 0; ; i++ { + intermediateCertStr := fmt.Sprintf(tsaIntermediateCertStrPattern, i) + exists, err = isTufTargetExist(ctx, intermediateCertStr) + if err != nil { + return nil, fmt.Errorf("error fetching TSA certificates: %w", err) + } + if !exists { + break + } + certNames = append(certNames, intermediateCertStr) + } + raw, err = fn(ctx, tuf.TSA, certNames) + if err != nil { + return nil, fmt.Errorf("error fetching TSA certificates: %w", err) + } + } + + if err != nil { + return nil, fmt.Errorf("error reading TSA certificate file: %w", err) + } + + leaves, intermediates, roots, err := splitPEMCertificateChain(raw) + if err != nil { + return nil, fmt.Errorf("error splitting TSA certificates: %w", err) + } + + if len(leaves) != 1 { + return nil, fmt.Errorf("TSA certificate chain must contain exactly one leaf certificate") + } + + if len(roots) == 0 { + return nil, fmt.Errorf("TSA certificate chain must contain at least one root certificate") + } + + return &TSACertificates{ + LeafCert: leaves[0], + IntermediateCerts: intermediates, + RootCert: roots, + }, nil +} + +// splitPEMCertificateChain returns a list of leaf (non-CA) certificates, a certificate pool for +// intermediate CA certificates, and a certificate pool for root CA certificates +func splitPEMCertificateChain(pem []byte) (leaves, intermediates, roots []*x509.Certificate, err error) { + certs, err := cryptoutils.UnmarshalCertificatesFromPEM(pem) + if err != nil { + return nil, nil, nil, err + } + + for _, cert := range certs { + if !cert.IsCA { + leaves = append(leaves, cert) + } else { + // root certificates are self-signed + if bytes.Equal(cert.RawSubject, cert.RawIssuer) { + roots = append(roots, cert) + } else { + intermediates = append(intermediates, cert) + } + } + } + + return leaves, intermediates, roots, nil +} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go index abd551d8..e565052d 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go @@ -26,6 +26,7 @@ import ( "encoding/hex" "encoding/json" "encoding/pem" + "errors" "fmt" "io/fs" "net/http" @@ -34,29 +35,24 @@ import ( "strings" "time" - "github.com/pkg/errors" - + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" "github.com/digitorus/timestamp" "github.com/go-openapi/runtime" - "github.com/nozzle/throttler" - - "github.com/sigstore/cosign/v2/internal/pkg/cosign" - "github.com/sigstore/cosign/v2/pkg/blob" - cbundle "github.com/sigstore/cosign/v2/pkg/cosign/bundle" - "github.com/sigstore/cosign/v2/pkg/oci/static" - "github.com/sigstore/cosign/v2/pkg/types" - - "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/remote/transport" - + "github.com/nozzle/throttler" ssldsse "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/cosign/v2/internal/pkg/cosign" ociexperimental "github.com/sigstore/cosign/v2/internal/pkg/oci/remote" "github.com/sigstore/cosign/v2/internal/ui" + "github.com/sigstore/cosign/v2/pkg/blob" + cbundle "github.com/sigstore/cosign/v2/pkg/cosign/bundle" "github.com/sigstore/cosign/v2/pkg/oci" "github.com/sigstore/cosign/v2/pkg/oci/layout" ociremote "github.com/sigstore/cosign/v2/pkg/oci/remote" + "github.com/sigstore/cosign/v2/pkg/oci/static" + "github.com/sigstore/cosign/v2/pkg/types" "github.com/sigstore/rekor/pkg/generated/client" "github.com/sigstore/rekor/pkg/generated/models" rekor_types "github.com/sigstore/rekor/pkg/types" @@ -65,6 +61,9 @@ import ( intoto_v001 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.1" intoto_v002 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.2" rekord_v001 "github.com/sigstore/rekor/pkg/types/rekord/v0.0.1" + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/verify" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/dsse" @@ -152,6 +151,8 @@ type CheckOpts struct { TSARootCertificates []*x509.Certificate // TSAIntermediateCertificates are the set of intermediates for chain building TSAIntermediateCertificates []*x509.Certificate + // UseSignedTimestamps enables timestamp verification using a TSA + UseSignedTimestamps bool // IgnoreTlog skip tlog verification IgnoreTlog bool @@ -163,6 +164,89 @@ type CheckOpts struct { // Should the experimental OCI 1.1 behaviour be enabled or not. // Defaults to false. ExperimentalOCI11 bool + + // NewBundleFormat enables the new bundle format (Cosign Bundle Spec) and the new verifier. + NewBundleFormat bool + + // TrustedMaterial is the trusted material to use for verification. + // Currently, this is only applicable when NewBundleFormat is true. + TrustedMaterial root.TrustedMaterial +} + +type verifyTrustedMaterial struct { + root.TrustedMaterial + keyTrustedMaterial root.TrustedMaterial +} + +func (v *verifyTrustedMaterial) PublicKeyVerifier(hint string) (root.TimeConstrainedVerifier, error) { + return v.keyTrustedMaterial.PublicKeyVerifier(hint) +} + +// verificationOptions returns the verification options for verifying with sigstore-go. +func (co *CheckOpts) verificationOptions() (trustedMaterial root.TrustedMaterial, verifierOptions []verify.VerifierOption, policyOptions []verify.PolicyOption, err error) { + policyOptions = make([]verify.PolicyOption, 0) + + if len(co.Identities) > 0 { + var sanMatcher verify.SubjectAlternativeNameMatcher + var issuerMatcher verify.IssuerMatcher + if len(co.Identities) > 1 { + return nil, nil, nil, fmt.Errorf("unsupported: multiple identities are not supported at this time") + } + sanMatcher, err = verify.NewSANMatcher(co.Identities[0].Subject, co.Identities[0].SubjectRegExp) + if err != nil { + return nil, nil, nil, err + } + + issuerMatcher, err = verify.NewIssuerMatcher(co.Identities[0].Issuer, co.Identities[0].IssuerRegExp) + if err != nil { + return nil, nil, nil, err + } + + extensions := certificate.Extensions{ + GithubWorkflowTrigger: co.CertGithubWorkflowTrigger, + GithubWorkflowSHA: co.CertGithubWorkflowSha, + GithubWorkflowName: co.CertGithubWorkflowName, + GithubWorkflowRepository: co.CertGithubWorkflowRepository, + GithubWorkflowRef: co.CertGithubWorkflowRef, + } + + certificateIdentities, err := verify.NewCertificateIdentity(sanMatcher, issuerMatcher, extensions) + if err != nil { + return nil, nil, nil, err + } + policyOptions = []verify.PolicyOption{verify.WithCertificateIdentity(certificateIdentities)} + } + + // Wrap TrustedMaterial + vTrustedMaterial := &verifyTrustedMaterial{TrustedMaterial: co.TrustedMaterial} + + verifierOptions = make([]verify.VerifierOption, 0) + + if co.SigVerifier != nil { + // We are verifying with a public key + policyOptions = append(policyOptions, verify.WithKey()) + newExpiringKey := root.NewExpiringKey(co.SigVerifier, time.Time{}, time.Time{}) + vTrustedMaterial.keyTrustedMaterial = root.NewTrustedPublicKeyMaterial(func(_ string) (root.TimeConstrainedVerifier, error) { + return newExpiringKey, nil + }) + } else { //nolint:gocritic + // We are verifying with a certificate + if !co.IgnoreSCT { + verifierOptions = append(verifierOptions, verify.WithSignedCertificateTimestamps(1)) + } + } + + if !co.IgnoreTlog { + verifierOptions = append(verifierOptions, verify.WithTransparencyLog(1), verify.WithIntegratedTimestamps(1)) + } + if co.UseSignedTimestamps { + verifierOptions = append(verifierOptions, verify.WithSignedTimestamps(1)) + } + if co.IgnoreTlog && !co.UseSignedTimestamps { + verifierOptions = append(verifierOptions, verify.WithCurrentTime()) + } + + return vTrustedMaterial, verifierOptions, policyOptions, nil } // This is a substitutable signature verification function that can be used for verifying @@ -660,18 +744,21 @@ func verifySignatures(ctx context.Context, sigs oci.Signatures, h v1.Hash, co *C // a. Verifies the Rekor entry in the bundle, if provided. This works offline OR // b. If we don't have a Rekor entry retrieved via cert, do an online lookup (assuming // we are in experimental mode). -// 3. If a certificate is provided, check it's expiration using the transparency log timestamp. +// 3. If a certificate is provided, check its expiration using the transparency log timestamp. func verifyInternal(ctx context.Context, sig oci.Signature, h v1.Hash, verifyFn signatureVerificationFn, co *CheckOpts) ( bundleVerified bool, err error) { var acceptableRFC3161Time, acceptableRekorBundleTime *time.Time // Timestamps for the signature we accept, or nil if not applicable. - acceptableRFC3161Timestamp, err := VerifyRFC3161Timestamp(sig, co) - if err != nil { - return false, fmt.Errorf("unable to verify RFC3161 timestamp bundle: %w", err) - } - if acceptableRFC3161Timestamp != nil { - acceptableRFC3161Time = &acceptableRFC3161Timestamp.Time + var acceptableRFC3161Timestamp *timestamp.Timestamp + if co.UseSignedTimestamps { + acceptableRFC3161Timestamp, err = VerifyRFC3161Timestamp(sig, co) + if err != nil { + return false, fmt.Errorf("unable to verify RFC3161 timestamp bundle: %w", err) + } + if acceptableRFC3161Timestamp != nil { + acceptableRFC3161Time = &acceptableRFC3161Timestamp.Time + } } if !co.IgnoreTlog { @@ -710,6 +797,7 @@ func verifyInternal(ctx context.Context, sig oci.Signature, h v1.Hash, } t := time.Unix(*e.IntegratedTime, 0) acceptableRekorBundleTime = &t + bundleVerified = true } } diff --git a/vendor/github.com/xanzy/go-gitlab/pages.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_bundle.go similarity index 51% rename from vendor/github.com/xanzy/go-gitlab/pages.go rename to vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_bundle.go index 617b0ba4..85a9a660 100644 --- a/vendor/github.com/xanzy/go-gitlab/pages.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_bundle.go @@ -1,5 +1,5 @@ // -// Copyright 2021, Sander van Harmelen +// Copyright 2025 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,34 +12,24 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package gitlab +package cosign import ( - "fmt" - "net/http" -) + "context" -type PagesService struct { - client *Client -} + "github.com/sigstore/sigstore-go/pkg/verify" +) -// UnpublishPages unpublished pages. The user must have admin privileges. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages.html#unpublish-pages -func (s *PagesService) UnpublishPages(gid interface{}, options ...RequestOptionFunc) (*Response, error) { - page, err := parseID(gid) +// VerifyNewBundle verifies a SigstoreBundle with the given parameters +func VerifyNewBundle(_ context.Context, co *CheckOpts, artifactPolicyOption verify.ArtifactPolicyOption, bundle verify.SignedEntity) (*verify.VerificationResult, error) { + trustedMaterial, verifierOptions, policyOptions, err := co.verificationOptions() if err != nil { return nil, err } - u := fmt.Sprintf("projects/%s/pages", PathEscape(page)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + verifier, err := verify.NewSignedEntityVerifier(trustedMaterial, verifierOptions...) if err != nil { return nil, err } - - return s.client.Do(req, nil) + return verifier.Verify(bundle, verify.NewPolicy(artifactPolicyOption, policyOptions...)) } diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_sct.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_sct.go index 934b2d97..1b904c2c 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_sct.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_sct.go @@ -108,7 +108,7 @@ func VerifySCT(_ context.Context, certPEM, chainPEM, rawSCT []byte, pubKeys *Tru } err = ctutil.VerifySCT(pubKeyMetadata.PubKey, []*ctx509.Certificate{cert, certChain[0]}, sct, true) if err != nil { - return fmt.Errorf("error verifying embedded SCT") + return fmt.Errorf("error verifying embedded SCT: %w", err) } if pubKeyMetadata.Status != tuf.Active { fmt.Fprintf(os.Stderr, "**Info** Successfully verified embedded SCT using an expired verification key\n") diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go index 58798339..8c6eda5f 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go @@ -21,6 +21,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/sigstore/cosign/v2/pkg/oci" ) @@ -48,6 +49,12 @@ type image struct { opt *options } +// The wrapped Image implements ConfigLayer, but the wrapping hides that from typechecks in pkg/v1/remote. +// Make image explicitly implement ConfigLayer so that this returns a mountable config layer for pkg/v1/remote. +func (i *image) ConfigLayer() (v1.Layer, error) { + return partial.ConfigLayer(i.Image) +} + var _ oci.SignedImage = (*image)(nil) // Signatures implements oci.SignedImage diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go index 0a7f2384..6eeaadd0 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go @@ -113,6 +113,14 @@ func WithRemoteOptions(opts ...remote.Option) Option { } } +// WithMoreRemoteOptions is a functional option for adding to the default +// remote options already specified +func WithMoreRemoteOptions(opts ...remote.Option) Option { + return func(o *options) { + o.ROpt = append(o.ROpt, opts...) + } +} + // WithTargetRepository is a functional option for overriding the default // target repository hosting the signature and attestation tags. func WithTargetRepository(repo name.Repository) Option { diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go index f8a53e0f..825d8072 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go @@ -21,6 +21,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/sigstore/cosign/v2/pkg/oci" "github.com/sigstore/cosign/v2/pkg/oci/empty" @@ -52,6 +53,12 @@ type sigs struct { v1.Image } +// The wrapped Image implements ConfigLayer, but the wrapping hides that from typechecks in pkg/v1/remote. +// Make sigs explicitly implement ConfigLayer so that this returns a mountable config layer for pkg/v1/remote. +func (s *sigs) ConfigLayer() (v1.Layer, error) { + return partial.ConfigLayer(s.Image) +} + var _ oci.Signatures = (*sigs)(nil) // Get implements oci.Signatures diff --git a/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt b/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt new file mode 100644 index 00000000..8b16ae01 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt @@ -0,0 +1,14 @@ + +Copyright 2022 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/protobuf-specs/LICENSE b/vendor/github.com/sigstore/protobuf-specs/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go new file mode 100644 index 00000000..80198530 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go @@ -0,0 +1,543 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.3 +// protoc v5.29.3 +// source: sigstore_bundle.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + dsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse" + v11 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Various timestamped counter signatures over the artifacts signature. +// Currently only RFC3161 signatures are provided. More formats may be added +// in the future. +type TimestampVerificationData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // A list of RFC3161 signed timestamps provided by the user. + // This can be used when the entry has not been stored on a + // transparency log, or in conjunction for a stronger trust model. + // Clients MUST verify the hashed message in the message imprint + // against the signature in the bundle. + Rfc3161Timestamps []*v1.RFC3161SignedTimestamp `protobuf:"bytes,1,rep,name=rfc3161_timestamps,json=rfc3161Timestamps,proto3" json:"rfc3161_timestamps,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimestampVerificationData) Reset() { + *x = TimestampVerificationData{} + mi := &file_sigstore_bundle_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimestampVerificationData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimestampVerificationData) ProtoMessage() {} + +func (x *TimestampVerificationData) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimestampVerificationData.ProtoReflect.Descriptor instead. +func (*TimestampVerificationData) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{0} +} + +func (x *TimestampVerificationData) GetRfc3161Timestamps() []*v1.RFC3161SignedTimestamp { + if x != nil { + return x.Rfc3161Timestamps + } + return nil +} + +// VerificationMaterial captures details on the materials used to verify +// signatures. This message may be embedded in a DSSE envelope as a signature +// extension. Specifically, the `ext` field of the extension will expect this +// message when the signature extension is for Sigstore. This is identified by +// the `kind` field in the extension, which must be set to +// application/vnd.dev.sigstore.verificationmaterial;version=0.1 for Sigstore. +// When used as a DSSE extension, if the `public_key` field is used to indicate +// the key identifier, it MUST match the `keyid` field of the signature the +// extension is attached to. +type VerificationMaterial struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The key material for verification purposes. + // + // This allows key material to be conveyed in one of three forms: + // + // 1. An unspecified public key identifier, for retrieving a key + // from an out-of-band mechanism (such as a keyring); + // + // 2. A sequence of one or more X.509 certificates, of which the first member + // MUST be a leaf certificate conveying the signing key. Subsequent members + // SHOULD be in issuing order, meaning that `n + 1` should be an issuer for `n`. + // + // Signers MUST NOT include root CA certificates in bundles, and SHOULD NOT + // include intermediate CA certificates that appear in an independent root of trust + // (such as the Public Good Instance's trusted root). + // + // Verifiers MUST validate the chain carefully to ensure that it chains up + // to a CA certificate that they independently trust. Verifiers SHOULD + // handle old or non-complying bundles that have superfluous intermediate and/or + // root CA certificates by either ignoring them or explicitly considering them + // untrusted for the purposes of chain building. + // + // 3. A single X.509 certificate, which MUST be a leaf certificate conveying + // the signing key. + // + // When used with the Public Good Instance (PGI) of Sigstore for "keyless" signing + // via Fulcio, form (1) MUST NOT be used, regardless of bundle version. Form (1) + // MAY be used with the PGI for self-managed keys. + // + // When used in a `0.1` or `0.2` bundle with the PGI and "keyless" signing, + // form (2) MUST be used. + // + // When used in a `0.3` bundle with the PGI and "keyless" signing, + // form (3) MUST be used. + // + // Types that are valid to be assigned to Content: + // + // *VerificationMaterial_PublicKey + // *VerificationMaterial_X509CertificateChain + // *VerificationMaterial_Certificate + Content isVerificationMaterial_Content `protobuf_oneof:"content"` + // An inclusion proof and an optional signed timestamp from the log. + // Client verification libraries MAY provide an option to support v0.1 + // bundles for backwards compatibility, which may contain an inclusion + // promise and not an inclusion proof. In this case, the client MUST + // validate the promise. + // Verifiers SHOULD NOT allow v0.1 bundles if they're used in an + // ecosystem which never produced them. + TlogEntries []*v11.TransparencyLogEntry `protobuf:"bytes,3,rep,name=tlog_entries,json=tlogEntries,proto3" json:"tlog_entries,omitempty"` + // Timestamp may also come from + // tlog_entries.inclusion_promise.signed_entry_timestamp. + TimestampVerificationData *TimestampVerificationData `protobuf:"bytes,4,opt,name=timestamp_verification_data,json=timestampVerificationData,proto3" json:"timestamp_verification_data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VerificationMaterial) Reset() { + *x = VerificationMaterial{} + mi := &file_sigstore_bundle_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VerificationMaterial) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerificationMaterial) ProtoMessage() {} + +func (x *VerificationMaterial) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerificationMaterial.ProtoReflect.Descriptor instead. +func (*VerificationMaterial) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{1} +} + +func (x *VerificationMaterial) GetContent() isVerificationMaterial_Content { + if x != nil { + return x.Content + } + return nil +} + +func (x *VerificationMaterial) GetPublicKey() *v1.PublicKeyIdentifier { + if x != nil { + if x, ok := x.Content.(*VerificationMaterial_PublicKey); ok { + return x.PublicKey + } + } + return nil +} + +func (x *VerificationMaterial) GetX509CertificateChain() *v1.X509CertificateChain { + if x != nil { + if x, ok := x.Content.(*VerificationMaterial_X509CertificateChain); ok { + return x.X509CertificateChain + } + } + return nil +} + +func (x *VerificationMaterial) GetCertificate() *v1.X509Certificate { + if x != nil { + if x, ok := x.Content.(*VerificationMaterial_Certificate); ok { + return x.Certificate + } + } + return nil +} + +func (x *VerificationMaterial) GetTlogEntries() []*v11.TransparencyLogEntry { + if x != nil { + return x.TlogEntries + } + return nil +} + +func (x *VerificationMaterial) GetTimestampVerificationData() *TimestampVerificationData { + if x != nil { + return x.TimestampVerificationData + } + return nil +} + +type isVerificationMaterial_Content interface { + isVerificationMaterial_Content() +} + +type VerificationMaterial_PublicKey struct { + PublicKey *v1.PublicKeyIdentifier `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3,oneof"` +} + +type VerificationMaterial_X509CertificateChain struct { + X509CertificateChain *v1.X509CertificateChain `protobuf:"bytes,2,opt,name=x509_certificate_chain,json=x509CertificateChain,proto3,oneof"` +} + +type VerificationMaterial_Certificate struct { + Certificate *v1.X509Certificate `protobuf:"bytes,5,opt,name=certificate,proto3,oneof"` +} + +func (*VerificationMaterial_PublicKey) isVerificationMaterial_Content() {} + +func (*VerificationMaterial_X509CertificateChain) isVerificationMaterial_Content() {} + +func (*VerificationMaterial_Certificate) isVerificationMaterial_Content() {} + +type Bundle struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.bundle.v0.3+json when + // when encoded as JSON. + // Clients must to be able to accept media type using the previously + // defined formats: + // * application/vnd.dev.sigstore.bundle+json;version=0.1 + // * application/vnd.dev.sigstore.bundle+json;version=0.2 + // * application/vnd.dev.sigstore.bundle+json;version=0.3 + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // When a signer is identified by a X.509 certificate, a verifier MUST + // verify that the signature was computed at the time the certificate + // was valid as described in the Sigstore client spec: "Verification + // using a Bundle". + // + // If the verification material contains a public key identifier + // (key hint) and the `content` is a DSSE envelope, the key hints + // MUST be exactly the same in the verification material and in the + // DSSE envelope. + VerificationMaterial *VerificationMaterial `protobuf:"bytes,2,opt,name=verification_material,json=verificationMaterial,proto3" json:"verification_material,omitempty"` + // Types that are valid to be assigned to Content: + // + // *Bundle_MessageSignature + // *Bundle_DsseEnvelope + Content isBundle_Content `protobuf_oneof:"content"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Bundle) Reset() { + *x = Bundle{} + mi := &file_sigstore_bundle_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Bundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bundle) ProtoMessage() {} + +func (x *Bundle) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bundle.ProtoReflect.Descriptor instead. +func (*Bundle) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{2} +} + +func (x *Bundle) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *Bundle) GetVerificationMaterial() *VerificationMaterial { + if x != nil { + return x.VerificationMaterial + } + return nil +} + +func (x *Bundle) GetContent() isBundle_Content { + if x != nil { + return x.Content + } + return nil +} + +func (x *Bundle) GetMessageSignature() *v1.MessageSignature { + if x != nil { + if x, ok := x.Content.(*Bundle_MessageSignature); ok { + return x.MessageSignature + } + } + return nil +} + +func (x *Bundle) GetDsseEnvelope() *dsse.Envelope { + if x != nil { + if x, ok := x.Content.(*Bundle_DsseEnvelope); ok { + return x.DsseEnvelope + } + } + return nil +} + +type isBundle_Content interface { + isBundle_Content() +} + +type Bundle_MessageSignature struct { + MessageSignature *v1.MessageSignature `protobuf:"bytes,3,opt,name=message_signature,json=messageSignature,proto3,oneof"` +} + +type Bundle_DsseEnvelope struct { + // A DSSE envelope can contain arbitrary payloads. + // Verifiers must verify that the payload type is a + // supported and expected type. This is part of the DSSE + // protocol which is defined here: + // + // DSSE envelopes in a bundle MUST have exactly one signature. + // This is a limitation from the DSSE spec, as it can contain + // multiple signatures. There are two primary reasons: + // 1. It simplifies the verification logic and policy + // 2. The bundle (currently) can only contain a single + // instance of the required verification materials + // + // During verification a client MUST reject an envelope if + // the number of signatures is not equal to one. + DsseEnvelope *dsse.Envelope `protobuf:"bytes,4,opt,name=dsse_envelope,json=dsseEnvelope,proto3,oneof"` +} + +func (*Bundle_MessageSignature) isBundle_Content() {} + +func (*Bundle_DsseEnvelope) isBundle_Content() {} + +var File_sigstore_bundle_proto protoreflect.FileDescriptor + +var file_sigstore_bundle_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7a, 0x0a, + 0x19, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5d, 0x0a, 0x12, 0x72, 0x66, + 0x63, 0x33, 0x31, 0x36, 0x31, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x72, 0x66, 0x63, 0x33, 0x31, 0x36, 0x31, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x14, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x69, 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x14, 0x78, 0x35, 0x30, 0x39, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x12, 0x50, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x74, 0x6c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x71, 0x0a, 0x1b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x22, 0xbf, 0x02, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x14, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x5c, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x73, 0x73, 0x65, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x33, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x42, 0x0b, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, + 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, + 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sigstore_bundle_proto_rawDescOnce sync.Once + file_sigstore_bundle_proto_rawDescData = file_sigstore_bundle_proto_rawDesc +) + +func file_sigstore_bundle_proto_rawDescGZIP() []byte { + file_sigstore_bundle_proto_rawDescOnce.Do(func() { + file_sigstore_bundle_proto_rawDescData = protoimpl.X.CompressGZIP(file_sigstore_bundle_proto_rawDescData) + }) + return file_sigstore_bundle_proto_rawDescData +} + +var file_sigstore_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_sigstore_bundle_proto_goTypes = []any{ + (*TimestampVerificationData)(nil), // 0: dev.sigstore.bundle.v1.TimestampVerificationData + (*VerificationMaterial)(nil), // 1: dev.sigstore.bundle.v1.VerificationMaterial + (*Bundle)(nil), // 2: dev.sigstore.bundle.v1.Bundle + (*v1.RFC3161SignedTimestamp)(nil), // 3: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*v1.PublicKeyIdentifier)(nil), // 4: dev.sigstore.common.v1.PublicKeyIdentifier + (*v1.X509CertificateChain)(nil), // 5: dev.sigstore.common.v1.X509CertificateChain + (*v1.X509Certificate)(nil), // 6: dev.sigstore.common.v1.X509Certificate + (*v11.TransparencyLogEntry)(nil), // 7: dev.sigstore.rekor.v1.TransparencyLogEntry + (*v1.MessageSignature)(nil), // 8: dev.sigstore.common.v1.MessageSignature + (*dsse.Envelope)(nil), // 9: io.intoto.Envelope +} +var file_sigstore_bundle_proto_depIdxs = []int32{ + 3, // 0: dev.sigstore.bundle.v1.TimestampVerificationData.rfc3161_timestamps:type_name -> dev.sigstore.common.v1.RFC3161SignedTimestamp + 4, // 1: dev.sigstore.bundle.v1.VerificationMaterial.public_key:type_name -> dev.sigstore.common.v1.PublicKeyIdentifier + 5, // 2: dev.sigstore.bundle.v1.VerificationMaterial.x509_certificate_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain + 6, // 3: dev.sigstore.bundle.v1.VerificationMaterial.certificate:type_name -> dev.sigstore.common.v1.X509Certificate + 7, // 4: dev.sigstore.bundle.v1.VerificationMaterial.tlog_entries:type_name -> dev.sigstore.rekor.v1.TransparencyLogEntry + 0, // 5: dev.sigstore.bundle.v1.VerificationMaterial.timestamp_verification_data:type_name -> dev.sigstore.bundle.v1.TimestampVerificationData + 1, // 6: dev.sigstore.bundle.v1.Bundle.verification_material:type_name -> dev.sigstore.bundle.v1.VerificationMaterial + 8, // 7: dev.sigstore.bundle.v1.Bundle.message_signature:type_name -> dev.sigstore.common.v1.MessageSignature + 9, // 8: dev.sigstore.bundle.v1.Bundle.dsse_envelope:type_name -> io.intoto.Envelope + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_bundle_proto_init() } +func file_sigstore_bundle_proto_init() { + if File_sigstore_bundle_proto != nil { + return + } + file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []any{ + (*VerificationMaterial_PublicKey)(nil), + (*VerificationMaterial_X509CertificateChain)(nil), + (*VerificationMaterial_Certificate)(nil), + } + file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []any{ + (*Bundle_MessageSignature)(nil), + (*Bundle_DsseEnvelope)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sigstore_bundle_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_bundle_proto_goTypes, + DependencyIndexes: file_sigstore_bundle_proto_depIdxs, + MessageInfos: file_sigstore_bundle_proto_msgTypes, + }.Build() + File_sigstore_bundle_proto = out.File + file_sigstore_bundle_proto_rawDesc = nil + file_sigstore_bundle_proto_goTypes = nil + file_sigstore_bundle_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go new file mode 100644 index 00000000..2c5c99ef --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -0,0 +1,1258 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.3 +// protoc v5.29.3 +// source: sigstore_common.proto + +package v1 + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Only a subset of the secure hash standard algorithms are supported. +// See for more +// details. +// UNSPECIFIED SHOULD not be used, primary reason for inclusion is to force +// any proto JSON serialization to emit the used hash algorithm, as default +// option is to *omit* the default value of an enum (which is the first +// value, represented by '0'. +type HashAlgorithm int32 + +const ( + HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED HashAlgorithm = 0 + HashAlgorithm_SHA2_256 HashAlgorithm = 1 + HashAlgorithm_SHA2_384 HashAlgorithm = 2 + HashAlgorithm_SHA2_512 HashAlgorithm = 3 + HashAlgorithm_SHA3_256 HashAlgorithm = 4 + HashAlgorithm_SHA3_384 HashAlgorithm = 5 +) + +// Enum value maps for HashAlgorithm. +var ( + HashAlgorithm_name = map[int32]string{ + 0: "HASH_ALGORITHM_UNSPECIFIED", + 1: "SHA2_256", + 2: "SHA2_384", + 3: "SHA2_512", + 4: "SHA3_256", + 5: "SHA3_384", + } + HashAlgorithm_value = map[string]int32{ + "HASH_ALGORITHM_UNSPECIFIED": 0, + "SHA2_256": 1, + "SHA2_384": 2, + "SHA2_512": 3, + "SHA3_256": 4, + "SHA3_384": 5, + } +) + +func (x HashAlgorithm) Enum() *HashAlgorithm { + p := new(HashAlgorithm) + *p = x + return p +} + +func (x HashAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HashAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[0].Descriptor() +} + +func (HashAlgorithm) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[0] +} + +func (x HashAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HashAlgorithm.Descriptor instead. +func (HashAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +// Details of a specific public key, capturing the the key encoding method, +// and signature algorithm. +// +// PublicKeyDetails captures the public key/hash algorithm combinations +// recommended in the Sigstore ecosystem. +// +// This is modelled as a linear set as we want to provide a small number of +// opinionated options instead of allowing every possible permutation. +// +// Any changes to this enum MUST be reflected in the algorithm registry. +// See: docs/algorithm-registry.md +// +// To avoid the possibility of contradicting formats such as PKCS1 with +// ED25519 the valid permutations are listed as a linear set instead of a +// cartesian set (i.e one combined variable instead of two, one for encoding +// and one for the signature algorithm). +type PublicKeyDetails int32 + +const ( + PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED PublicKeyDetails = 0 + // RSA + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKCS1_RSA_PSS PublicKeyDetails = 2 // See RFC8017 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_RSA_PSS PublicKeyDetails = 4 + // RSA public key in PKIX format, PKCS#1v1.5 signature + PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 PublicKeyDetails = 9 + PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 PublicKeyDetails = 10 + PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 PublicKeyDetails = 11 + // RSA public key in PKIX format, RSASSA-PSS signature + PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256 PublicKeyDetails = 16 // See RFC4055 + PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256 PublicKeyDetails = 17 + PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256 PublicKeyDetails = 18 + // ECDSA + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P256_HMAC_SHA_256 PublicKeyDetails = 6 // See RFC6979 + PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 PublicKeyDetails = 5 // See NIST FIPS 186-4 + PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 PublicKeyDetails = 12 + PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 PublicKeyDetails = 13 + // Ed 25519 + PublicKeyDetails_PKIX_ED25519 PublicKeyDetails = 7 // See RFC8032 + PublicKeyDetails_PKIX_ED25519_PH PublicKeyDetails = 8 + // LMS and LM-OTS + // + // These keys and signatures may be used by private Sigstore + // deployments, but are not currently supported by the public + // good instance. + // + // USER WARNING: LMS and LM-OTS are both stateful signature schemes. + // Using them correctly requires discretion and careful consideration + // to ensure that individual secret keys are not used more than once. + // In addition, LM-OTS is a single-use scheme, meaning that it + // MUST NOT be used for more than one signature per LM-OTS key. + // If you cannot maintain these invariants, you MUST NOT use these + // schemes. + PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14 + PublicKeyDetails_LMOTS_SHA256 PublicKeyDetails = 15 +) + +// Enum value maps for PublicKeyDetails. +var ( + PublicKeyDetails_name = map[int32]string{ + 0: "PUBLIC_KEY_DETAILS_UNSPECIFIED", + 1: "PKCS1_RSA_PKCS1V5", + 2: "PKCS1_RSA_PSS", + 3: "PKIX_RSA_PKCS1V5", + 4: "PKIX_RSA_PSS", + 9: "PKIX_RSA_PKCS1V15_2048_SHA256", + 10: "PKIX_RSA_PKCS1V15_3072_SHA256", + 11: "PKIX_RSA_PKCS1V15_4096_SHA256", + 16: "PKIX_RSA_PSS_2048_SHA256", + 17: "PKIX_RSA_PSS_3072_SHA256", + 18: "PKIX_RSA_PSS_4096_SHA256", + 6: "PKIX_ECDSA_P256_HMAC_SHA_256", + 5: "PKIX_ECDSA_P256_SHA_256", + 12: "PKIX_ECDSA_P384_SHA_384", + 13: "PKIX_ECDSA_P521_SHA_512", + 7: "PKIX_ED25519", + 8: "PKIX_ED25519_PH", + 14: "LMS_SHA256", + 15: "LMOTS_SHA256", + } + PublicKeyDetails_value = map[string]int32{ + "PUBLIC_KEY_DETAILS_UNSPECIFIED": 0, + "PKCS1_RSA_PKCS1V5": 1, + "PKCS1_RSA_PSS": 2, + "PKIX_RSA_PKCS1V5": 3, + "PKIX_RSA_PSS": 4, + "PKIX_RSA_PKCS1V15_2048_SHA256": 9, + "PKIX_RSA_PKCS1V15_3072_SHA256": 10, + "PKIX_RSA_PKCS1V15_4096_SHA256": 11, + "PKIX_RSA_PSS_2048_SHA256": 16, + "PKIX_RSA_PSS_3072_SHA256": 17, + "PKIX_RSA_PSS_4096_SHA256": 18, + "PKIX_ECDSA_P256_HMAC_SHA_256": 6, + "PKIX_ECDSA_P256_SHA_256": 5, + "PKIX_ECDSA_P384_SHA_384": 12, + "PKIX_ECDSA_P521_SHA_512": 13, + "PKIX_ED25519": 7, + "PKIX_ED25519_PH": 8, + "LMS_SHA256": 14, + "LMOTS_SHA256": 15, + } +) + +func (x PublicKeyDetails) Enum() *PublicKeyDetails { + p := new(PublicKeyDetails) + *p = x + return p +} + +func (x PublicKeyDetails) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PublicKeyDetails) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[1].Descriptor() +} + +func (PublicKeyDetails) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[1] +} + +func (x PublicKeyDetails) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PublicKeyDetails.Descriptor instead. +func (PublicKeyDetails) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +type SubjectAlternativeNameType int32 + +const ( + SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED SubjectAlternativeNameType = 0 + SubjectAlternativeNameType_EMAIL SubjectAlternativeNameType = 1 + SubjectAlternativeNameType_URI SubjectAlternativeNameType = 2 + // OID 1.3.6.1.4.1.57264.1.7 + // See https://github.com/sigstore/fulcio/blob/main/docs/oid-info.md#1361415726417--othername-san + // for more details. + SubjectAlternativeNameType_OTHER_NAME SubjectAlternativeNameType = 3 +) + +// Enum value maps for SubjectAlternativeNameType. +var ( + SubjectAlternativeNameType_name = map[int32]string{ + 0: "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED", + 1: "EMAIL", + 2: "URI", + 3: "OTHER_NAME", + } + SubjectAlternativeNameType_value = map[string]int32{ + "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED": 0, + "EMAIL": 1, + "URI": 2, + "OTHER_NAME": 3, + } +) + +func (x SubjectAlternativeNameType) Enum() *SubjectAlternativeNameType { + p := new(SubjectAlternativeNameType) + *p = x + return p +} + +func (x SubjectAlternativeNameType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubjectAlternativeNameType) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[2].Descriptor() +} + +func (SubjectAlternativeNameType) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[2] +} + +func (x SubjectAlternativeNameType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubjectAlternativeNameType.Descriptor instead. +func (SubjectAlternativeNameType) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +// HashOutput captures a digest of a 'message' (generic octet sequence) +// and the corresponding hash algorithm used. +type HashOutput struct { + state protoimpl.MessageState `protogen:"open.v1"` + Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` + // This is the raw octets of the message digest as computed by + // the hash algorithm. + Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HashOutput) Reset() { + *x = HashOutput{} + mi := &file_sigstore_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HashOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashOutput) ProtoMessage() {} + +func (x *HashOutput) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashOutput.ProtoReflect.Descriptor instead. +func (*HashOutput) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +func (x *HashOutput) GetAlgorithm() HashAlgorithm { + if x != nil { + return x.Algorithm + } + return HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED +} + +func (x *HashOutput) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +// MessageSignature stores the computed signature over a message. +type MessageSignature struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Message digest can be used to identify the artifact. + // Clients MUST NOT attempt to use this digest to verify the associated + // signature; it is intended solely for identification. + MessageDigest *HashOutput `protobuf:"bytes,1,opt,name=message_digest,json=messageDigest,proto3" json:"message_digest,omitempty"` + // The raw bytes as returned from the signature algorithm. + // The signature algorithm (and so the format of the signature bytes) + // are determined by the contents of the 'verification_material', + // either a key-pair or a certificate. If using a certificate, the + // certificate contains the required information on the signature + // algorithm. + // When using a key pair, the algorithm MUST be part of the public + // key, which MUST be communicated out-of-band. + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageSignature) Reset() { + *x = MessageSignature{} + mi := &file_sigstore_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageSignature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageSignature) ProtoMessage() {} + +func (x *MessageSignature) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageSignature.ProtoReflect.Descriptor instead. +func (*MessageSignature) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +func (x *MessageSignature) GetMessageDigest() *HashOutput { + if x != nil { + return x.MessageDigest + } + return nil +} + +func (x *MessageSignature) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +// LogId captures the identity of a transparency log. +type LogId struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The unique identity of the log, represented by its public key. + KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LogId) Reset() { + *x = LogId{} + mi := &file_sigstore_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LogId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogId) ProtoMessage() {} + +func (x *LogId) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogId.ProtoReflect.Descriptor instead. +func (*LogId) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +func (x *LogId) GetKeyId() []byte { + if x != nil { + return x.KeyId + } + return nil +} + +// This message holds a RFC 3161 timestamp. +type RFC3161SignedTimestamp struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signed timestamp is the DER encoded TimeStampResponse. + // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 + SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RFC3161SignedTimestamp) Reset() { + *x = RFC3161SignedTimestamp{} + mi := &file_sigstore_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RFC3161SignedTimestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RFC3161SignedTimestamp) ProtoMessage() {} + +func (x *RFC3161SignedTimestamp) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RFC3161SignedTimestamp.ProtoReflect.Descriptor instead. +func (*RFC3161SignedTimestamp) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{3} +} + +func (x *RFC3161SignedTimestamp) GetSignedTimestamp() []byte { + if x != nil { + return x.SignedTimestamp + } + return nil +} + +type PublicKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded public key, encoding method is specified by the + // key_details attribute. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3,oneof" json:"raw_bytes,omitempty"` + // Key encoding and signature algorithm to use for this key. + KeyDetails PublicKeyDetails `protobuf:"varint,2,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` + // Optional validity period for this key, *inclusive* of the endpoints. + ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + mi := &file_sigstore_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{4} +} + +func (x *PublicKey) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +func (x *PublicKey) GetKeyDetails() PublicKeyDetails { + if x != nil { + return x.KeyDetails + } + return PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED +} + +func (x *PublicKey) GetValidFor() *TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +// PublicKeyIdentifier can be used to identify an (out of band) delivered +// key, to verify a signature. +type PublicKeyIdentifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Optional unauthenticated hint on which key to use. + // The format of the hint must be agreed upon out of band by the + // signer and the verifiers, and so is not subject to this + // specification. + // Example use-case is to specify the public key to use, from a + // trusted key-ring. + // Implementors are RECOMMENDED to derive the value from the public + // key as described in RFC 6962. + // See: + Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKeyIdentifier) Reset() { + *x = PublicKeyIdentifier{} + mi := &file_sigstore_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKeyIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKeyIdentifier) ProtoMessage() {} + +func (x *PublicKeyIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKeyIdentifier.ProtoReflect.Descriptor instead. +func (*PublicKeyIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{5} +} + +func (x *PublicKeyIdentifier) GetHint() string { + if x != nil { + return x.Hint + } + return "" +} + +// An ASN.1 OBJECT IDENTIFIER +type ObjectIdentifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObjectIdentifier) Reset() { + *x = ObjectIdentifier{} + mi := &file_sigstore_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObjectIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifier) ProtoMessage() {} + +func (x *ObjectIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifier.ProtoReflect.Descriptor instead. +func (*ObjectIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{6} +} + +func (x *ObjectIdentifier) GetId() []int32 { + if x != nil { + return x.Id + } + return nil +} + +// An OID and the corresponding (byte) value. +type ObjectIdentifierValuePair struct { + state protoimpl.MessageState `protogen:"open.v1"` + Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObjectIdentifierValuePair) Reset() { + *x = ObjectIdentifierValuePair{} + mi := &file_sigstore_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObjectIdentifierValuePair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifierValuePair) ProtoMessage() {} + +func (x *ObjectIdentifierValuePair) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifierValuePair.ProtoReflect.Descriptor instead. +func (*ObjectIdentifierValuePair) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{7} +} + +func (x *ObjectIdentifierValuePair) GetOid() *ObjectIdentifier { + if x != nil { + return x.Oid + } + return nil +} + +func (x *ObjectIdentifierValuePair) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type DistinguishedName struct { + state protoimpl.MessageState `protogen:"open.v1"` + Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` + CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DistinguishedName) Reset() { + *x = DistinguishedName{} + mi := &file_sigstore_common_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DistinguishedName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistinguishedName) ProtoMessage() {} + +func (x *DistinguishedName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistinguishedName.ProtoReflect.Descriptor instead. +func (*DistinguishedName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{8} +} + +func (x *DistinguishedName) GetOrganization() string { + if x != nil { + return x.Organization + } + return "" +} + +func (x *DistinguishedName) GetCommonName() string { + if x != nil { + return x.CommonName + } + return "" +} + +type X509Certificate struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded X.509 certificate. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *X509Certificate) Reset() { + *x = X509Certificate{} + mi := &file_sigstore_common_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *X509Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509Certificate) ProtoMessage() {} + +func (x *X509Certificate) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509Certificate.ProtoReflect.Descriptor instead. +func (*X509Certificate) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{9} +} + +func (x *X509Certificate) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +type SubjectAlternativeName struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` + // Types that are valid to be assigned to Identity: + // + // *SubjectAlternativeName_Regexp + // *SubjectAlternativeName_Value + Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SubjectAlternativeName) Reset() { + *x = SubjectAlternativeName{} + mi := &file_sigstore_common_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SubjectAlternativeName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAlternativeName) ProtoMessage() {} + +func (x *SubjectAlternativeName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAlternativeName.ProtoReflect.Descriptor instead. +func (*SubjectAlternativeName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{10} +} + +func (x *SubjectAlternativeName) GetType() SubjectAlternativeNameType { + if x != nil { + return x.Type + } + return SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED +} + +func (x *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (x *SubjectAlternativeName) GetRegexp() string { + if x != nil { + if x, ok := x.Identity.(*SubjectAlternativeName_Regexp); ok { + return x.Regexp + } + } + return "" +} + +func (x *SubjectAlternativeName) GetValue() string { + if x != nil { + if x, ok := x.Identity.(*SubjectAlternativeName_Value); ok { + return x.Value + } + } + return "" +} + +type isSubjectAlternativeName_Identity interface { + isSubjectAlternativeName_Identity() +} + +type SubjectAlternativeName_Regexp struct { + // A regular expression describing the expected value for + // the SAN. + Regexp string `protobuf:"bytes,2,opt,name=regexp,proto3,oneof"` +} + +type SubjectAlternativeName_Value struct { + // The exact value to match against. + Value string `protobuf:"bytes,3,opt,name=value,proto3,oneof"` +} + +func (*SubjectAlternativeName_Regexp) isSubjectAlternativeName_Identity() {} + +func (*SubjectAlternativeName_Value) isSubjectAlternativeName_Identity() {} + +// A collection of X.509 certificates. +// +// This "chain" can be used in multiple contexts, such as providing a root CA +// certificate within a TUF root of trust or multiple untrusted certificates for +// the purpose of chain building. +type X509CertificateChain struct { + state protoimpl.MessageState `protogen:"open.v1"` + // One or more DER-encoded certificates. + // + // In some contexts (such as `VerificationMaterial.x509_certificate_chain`), this sequence + // has an imposed order. Unless explicitly specified, there is otherwise no + // guaranteed order. + Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *X509CertificateChain) Reset() { + *x = X509CertificateChain{} + mi := &file_sigstore_common_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *X509CertificateChain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509CertificateChain) ProtoMessage() {} + +func (x *X509CertificateChain) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509CertificateChain.ProtoReflect.Descriptor instead. +func (*X509CertificateChain) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{11} +} + +func (x *X509CertificateChain) GetCertificates() []*X509Certificate { + if x != nil { + return x.Certificates + } + return nil +} + +// The time range is closed and includes both the start and end times, +// (i.e., [start, end]). +// End is optional to be able to capture a period that has started but +// has no known end. +type TimeRange struct { + state protoimpl.MessageState `protogen:"open.v1"` + Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimeRange) Reset() { + *x = TimeRange{} + mi := &file_sigstore_common_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimeRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeRange) ProtoMessage() {} + +func (x *TimeRange) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeRange.ProtoReflect.Descriptor instead. +func (*TimeRange) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{12} +} + +func (x *TimeRange) GetStart() *timestamppb.Timestamp { + if x != nil { + return x.Start + } + return nil +} + +func (x *TimeRange) GetEnd() *timestamppb.Timestamp { + if x != nil { + return x.End + } + return nil +} + +var File_sigstore_common_proto protoreflect.FileDescriptor + +var file_sigstore_common_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x69, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x43, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x80, 0x01, 0x0a, + 0x10, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0d, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, + 0x23, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6b, + 0x65, 0x79, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x16, 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, + 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xd9, + 0x01, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, + 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x49, + 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0a, 0x6b, + 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x09, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, + 0x01, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x42, 0x0c, 0x0a, 0x0a, + 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x22, 0x29, 0x0a, 0x13, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x68, 0x69, 0x6e, 0x74, 0x22, 0x27, 0x0a, 0x10, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, + 0x0a, 0x19, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x3a, 0x0a, 0x03, 0x6f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x58, 0x0a, + 0x11, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x33, 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, + 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, + 0x16, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x18, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x12, 0x16, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x63, 0x0a, + 0x14, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x73, 0x22, 0x78, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x30, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x31, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x65, 0x6e, 0x64, 0x2a, 0x75, 0x0a, 0x0d, + 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1e, 0x0a, + 0x1a, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x53, 0x48, 0x41, 0x32, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x48, 0x41, 0x32, 0x5f, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, + 0x32, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, + 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x33, 0x38, + 0x34, 0x10, 0x05, 0x2a, 0xa7, 0x04, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, + 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x11, + 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, + 0x35, 0x10, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x15, 0x0a, 0x0d, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x02, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x18, + 0x0a, 0x10, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x35, 0x10, 0x03, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x04, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x21, + 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x31, 0x35, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x09, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0b, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x10, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x11, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, + 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x12, 0x12, 0x24, 0x0a, 0x1c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x32, 0x35, 0x36, 0x5f, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x06, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, + 0x35, 0x36, 0x10, 0x05, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x33, 0x38, 0x34, 0x10, + 0x0c, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x0d, 0x12, 0x10, + 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x07, + 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x5f, 0x50, 0x48, 0x10, 0x08, 0x12, 0x0e, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x4d, 0x4f, 0x54, 0x53, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x22, 0x04, 0x08, 0x13, 0x10, 0x32, 0x2a, 0x6f, 0x0a, + 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x29, 0x53, + 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x54, 0x49, + 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, + 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, 0x12, 0x0e, + 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x7c, + 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, + 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sigstore_common_proto_rawDescOnce sync.Once + file_sigstore_common_proto_rawDescData = file_sigstore_common_proto_rawDesc +) + +func file_sigstore_common_proto_rawDescGZIP() []byte { + file_sigstore_common_proto_rawDescOnce.Do(func() { + file_sigstore_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_sigstore_common_proto_rawDescData) + }) + return file_sigstore_common_proto_rawDescData +} + +var file_sigstore_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_sigstore_common_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_sigstore_common_proto_goTypes = []any{ + (HashAlgorithm)(0), // 0: dev.sigstore.common.v1.HashAlgorithm + (PublicKeyDetails)(0), // 1: dev.sigstore.common.v1.PublicKeyDetails + (SubjectAlternativeNameType)(0), // 2: dev.sigstore.common.v1.SubjectAlternativeNameType + (*HashOutput)(nil), // 3: dev.sigstore.common.v1.HashOutput + (*MessageSignature)(nil), // 4: dev.sigstore.common.v1.MessageSignature + (*LogId)(nil), // 5: dev.sigstore.common.v1.LogId + (*RFC3161SignedTimestamp)(nil), // 6: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*PublicKey)(nil), // 7: dev.sigstore.common.v1.PublicKey + (*PublicKeyIdentifier)(nil), // 8: dev.sigstore.common.v1.PublicKeyIdentifier + (*ObjectIdentifier)(nil), // 9: dev.sigstore.common.v1.ObjectIdentifier + (*ObjectIdentifierValuePair)(nil), // 10: dev.sigstore.common.v1.ObjectIdentifierValuePair + (*DistinguishedName)(nil), // 11: dev.sigstore.common.v1.DistinguishedName + (*X509Certificate)(nil), // 12: dev.sigstore.common.v1.X509Certificate + (*SubjectAlternativeName)(nil), // 13: dev.sigstore.common.v1.SubjectAlternativeName + (*X509CertificateChain)(nil), // 14: dev.sigstore.common.v1.X509CertificateChain + (*TimeRange)(nil), // 15: dev.sigstore.common.v1.TimeRange + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp +} +var file_sigstore_common_proto_depIdxs = []int32{ + 0, // 0: dev.sigstore.common.v1.HashOutput.algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 3, // 1: dev.sigstore.common.v1.MessageSignature.message_digest:type_name -> dev.sigstore.common.v1.HashOutput + 1, // 2: dev.sigstore.common.v1.PublicKey.key_details:type_name -> dev.sigstore.common.v1.PublicKeyDetails + 15, // 3: dev.sigstore.common.v1.PublicKey.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 9, // 4: dev.sigstore.common.v1.ObjectIdentifierValuePair.oid:type_name -> dev.sigstore.common.v1.ObjectIdentifier + 2, // 5: dev.sigstore.common.v1.SubjectAlternativeName.type:type_name -> dev.sigstore.common.v1.SubjectAlternativeNameType + 12, // 6: dev.sigstore.common.v1.X509CertificateChain.certificates:type_name -> dev.sigstore.common.v1.X509Certificate + 16, // 7: dev.sigstore.common.v1.TimeRange.start:type_name -> google.protobuf.Timestamp + 16, // 8: dev.sigstore.common.v1.TimeRange.end:type_name -> google.protobuf.Timestamp + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_common_proto_init() } +func file_sigstore_common_proto_init() { + if File_sigstore_common_proto != nil { + return + } + file_sigstore_common_proto_msgTypes[4].OneofWrappers = []any{} + file_sigstore_common_proto_msgTypes[10].OneofWrappers = []any{ + (*SubjectAlternativeName_Regexp)(nil), + (*SubjectAlternativeName_Value)(nil), + } + file_sigstore_common_proto_msgTypes[12].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sigstore_common_proto_rawDesc, + NumEnums: 3, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_common_proto_goTypes, + DependencyIndexes: file_sigstore_common_proto_depIdxs, + EnumInfos: file_sigstore_common_proto_enumTypes, + MessageInfos: file_sigstore_common_proto_msgTypes, + }.Build() + File_sigstore_common_proto = out.File + file_sigstore_common_proto_rawDesc = nil + file_sigstore_common_proto_goTypes = nil + file_sigstore_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go new file mode 100644 index 00000000..16e581eb --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go @@ -0,0 +1,241 @@ +// https://raw.githubusercontent.com/secure-systems-lab/dsse/9c813476bd36de70a5738c72e784f123ecea16af/envelope.proto + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.3 +// protoc v5.29.3 +// source: envelope.proto + +package dsse + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An authenticated message of arbitrary type. +type Envelope struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Message to be signed. (In JSON, this is encoded as base64.) + // REQUIRED. + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + // String unambiguously identifying how to interpret payload. + // REQUIRED. + PayloadType string `protobuf:"bytes,2,opt,name=payloadType,proto3" json:"payloadType,omitempty"` + // Signature over: + // + // PAE(type, payload) + // + // Where PAE is defined as: + // PAE(type, payload) = "DSSEv1" + SP + LEN(type) + SP + type + SP + LEN(payload) + SP + payload + // + = concatenation + // SP = ASCII space [0x20] + // "DSSEv1" = ASCII [0x44, 0x53, 0x53, 0x45, 0x76, 0x31] + // LEN(s) = ASCII decimal encoding of the byte length of s, with no leading zeros + // REQUIRED (length >= 1). + Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Envelope) Reset() { + *x = Envelope{} + mi := &file_envelope_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Envelope) ProtoMessage() {} + +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_envelope_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. +func (*Envelope) Descriptor() ([]byte, []int) { + return file_envelope_proto_rawDescGZIP(), []int{0} +} + +func (x *Envelope) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Envelope) GetPayloadType() string { + if x != nil { + return x.PayloadType + } + return "" +} + +func (x *Envelope) GetSignatures() []*Signature { + if x != nil { + return x.Signatures + } + return nil +} + +type Signature struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signature itself. (In JSON, this is encoded as base64.) + // REQUIRED. + Sig []byte `protobuf:"bytes,1,opt,name=sig,proto3" json:"sig,omitempty"` + // *Unauthenticated* hint identifying which public key was used. + // OPTIONAL. + Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Signature) Reset() { + *x = Signature{} + mi := &file_envelope_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_envelope_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_envelope_proto_rawDescGZIP(), []int{1} +} + +func (x *Signature) GetSig() []byte { + if x != nil { + return x.Sig + } + return nil +} + +func (x *Signature) GetKeyid() string { + if x != nil { + return x.Keyid + } + return "" +} + +var File_envelope_proto protoreflect.FileDescriptor + +var file_envelope_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x09, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x08, 0x45, + 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x33, 0x0a, 0x09, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x65, 0x79, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x69, 0x64, 0x42, 0x44, + 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, + 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x64, + 0x73, 0x73, 0x65, 0xea, 0x02, 0x0e, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, + 0x44, 0x53, 0x53, 0x45, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envelope_proto_rawDescOnce sync.Once + file_envelope_proto_rawDescData = file_envelope_proto_rawDesc +) + +func file_envelope_proto_rawDescGZIP() []byte { + file_envelope_proto_rawDescOnce.Do(func() { + file_envelope_proto_rawDescData = protoimpl.X.CompressGZIP(file_envelope_proto_rawDescData) + }) + return file_envelope_proto_rawDescData +} + +var file_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envelope_proto_goTypes = []any{ + (*Envelope)(nil), // 0: io.intoto.Envelope + (*Signature)(nil), // 1: io.intoto.Signature +} +var file_envelope_proto_depIdxs = []int32{ + 1, // 0: io.intoto.Envelope.signatures:type_name -> io.intoto.Signature + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envelope_proto_init() } +func file_envelope_proto_init() { + if File_envelope_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envelope_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envelope_proto_goTypes, + DependencyIndexes: file_envelope_proto_depIdxs, + MessageInfos: file_envelope_proto_msgTypes, + }.Build() + File_envelope_proto = out.File + file_envelope_proto_rawDesc = nil + file_envelope_proto_goTypes = nil + file_envelope_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go new file mode 100644 index 00000000..5874bc29 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go @@ -0,0 +1,560 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.3 +// protoc v5.29.3 +// source: sigstore_rekor.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// KindVersion contains the entry's kind and api version. +type KindVersion struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Kind is the type of entry being stored in the log. + // See here for a list: https://github.com/sigstore/rekor/tree/main/pkg/types + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The specific api version of the type. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *KindVersion) Reset() { + *x = KindVersion{} + mi := &file_sigstore_rekor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *KindVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KindVersion) ProtoMessage() {} + +func (x *KindVersion) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KindVersion.ProtoReflect.Descriptor instead. +func (*KindVersion) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{0} +} + +func (x *KindVersion) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *KindVersion) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// The checkpoint MUST contain an origin string as a unique log identifier, +// the tree size, and the root hash. It MAY also be followed by optional data, +// and clients MUST NOT assume optional data. The checkpoint MUST also contain +// a signature over the root hash (tree head). The checkpoint MAY contain additional +// signatures, but the first SHOULD be the signature from the log. Checkpoint contents +// are concatenated with newlines into a single string. +// The checkpoint format is described in +// https://github.com/transparency-dev/formats/blob/main/log/README.md +// and https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md. +// An example implementation can be found in https://github.com/sigstore/rekor/blob/main/pkg/util/signed_note.go +type Checkpoint struct { + state protoimpl.MessageState `protogen:"open.v1"` + Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Checkpoint) Reset() { + *x = Checkpoint{} + mi := &file_sigstore_rekor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Checkpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Checkpoint) ProtoMessage() {} + +func (x *Checkpoint) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Checkpoint.ProtoReflect.Descriptor instead. +func (*Checkpoint) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{1} +} + +func (x *Checkpoint) GetEnvelope() string { + if x != nil { + return x.Envelope + } + return "" +} + +// InclusionProof is the proof returned from the transparency log. Can +// be used for offline or online verification against the log. +type InclusionProof struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The index of the entry in the tree it was written to. + LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + // The hash digest stored at the root of the merkle tree at the time + // the proof was generated. + RootHash []byte `protobuf:"bytes,2,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + // The size of the merkle tree at the time the proof was generated. + TreeSize int64 `protobuf:"varint,3,opt,name=tree_size,json=treeSize,proto3" json:"tree_size,omitempty"` + // A list of hashes required to compute the inclusion proof, sorted + // in order from leaf to root. + // Note that leaf and root hashes are not included. + // The root hash is available separately in this message, and the + // leaf hash should be calculated by the client. + Hashes [][]byte `protobuf:"bytes,4,rep,name=hashes,proto3" json:"hashes,omitempty"` + // Signature of the tree head, as of the time of this proof was + // generated. See above info on 'Checkpoint' for more details. + Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InclusionProof) Reset() { + *x = InclusionProof{} + mi := &file_sigstore_rekor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InclusionProof) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InclusionProof) ProtoMessage() {} + +func (x *InclusionProof) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InclusionProof.ProtoReflect.Descriptor instead. +func (*InclusionProof) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{2} +} + +func (x *InclusionProof) GetLogIndex() int64 { + if x != nil { + return x.LogIndex + } + return 0 +} + +func (x *InclusionProof) GetRootHash() []byte { + if x != nil { + return x.RootHash + } + return nil +} + +func (x *InclusionProof) GetTreeSize() int64 { + if x != nil { + return x.TreeSize + } + return 0 +} + +func (x *InclusionProof) GetHashes() [][]byte { + if x != nil { + return x.Hashes + } + return nil +} + +func (x *InclusionProof) GetCheckpoint() *Checkpoint { + if x != nil { + return x.Checkpoint + } + return nil +} + +// The inclusion promise is calculated by Rekor. It's calculated as a +// signature over a canonical JSON serialization of the persisted entry, the +// log ID, log index and the integration timestamp. +// See https://github.com/sigstore/rekor/blob/a6e58f72b6b18cc06cefe61808efd562b9726330/pkg/api/entries.go#L54 +// The format of the signature depends on the transparency log's public key. +// If the signature algorithm requires a hash function and/or a signature +// scheme (e.g. RSA) those has to be retrieved out-of-band from the log's +// operators, together with the public key. +// This is used to verify the integration timestamp's value and that the log +// has promised to include the entry. +type InclusionPromise struct { + state protoimpl.MessageState `protogen:"open.v1"` + SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InclusionPromise) Reset() { + *x = InclusionPromise{} + mi := &file_sigstore_rekor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InclusionPromise) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InclusionPromise) ProtoMessage() {} + +func (x *InclusionPromise) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InclusionPromise.ProtoReflect.Descriptor instead. +func (*InclusionPromise) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{3} +} + +func (x *InclusionPromise) GetSignedEntryTimestamp() []byte { + if x != nil { + return x.SignedEntryTimestamp + } + return nil +} + +// TransparencyLogEntry captures all the details required from Rekor to +// reconstruct an entry, given that the payload is provided via other means. +// This type can easily be created from the existing response from Rekor. +// Future iterations could rely on Rekor returning the minimal set of +// attributes (excluding the payload) that are required for verifying the +// inclusion promise. The inclusion promise (called SignedEntryTimestamp in +// the response from Rekor) is similar to a Signed Certificate Timestamp +// as described here https://www.rfc-editor.org/rfc/rfc6962.html#section-3.2. +type TransparencyLogEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The global index of the entry, used when querying the log by index. + LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + // The unique identifier of the log. + LogId *v1.LogId `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + // The kind (type) and version of the object associated with this + // entry. These values are required to construct the entry during + // verification. + KindVersion *KindVersion `protobuf:"bytes,3,opt,name=kind_version,json=kindVersion,proto3" json:"kind_version,omitempty"` + // The UNIX timestamp from the log when the entry was persisted. + // The integration time MUST NOT be trusted if inclusion_promise + // is omitted. + IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"` + // The inclusion promise/signed entry timestamp from the log. + // Required for v0.1 bundles, and MUST be verified. + // Optional for >= v0.2 bundles if another suitable source of + // time is present (such as another source of signed time, + // or the current system time for long-lived certificates). + // MUST be verified if no other suitable source of time is present, + // and SHOULD be verified otherwise. + InclusionPromise *InclusionPromise `protobuf:"bytes,5,opt,name=inclusion_promise,json=inclusionPromise,proto3" json:"inclusion_promise,omitempty"` + // The inclusion proof can be used for offline or online verification + // that the entry was appended to the log, and that the log has not been + // altered. + InclusionProof *InclusionProof `protobuf:"bytes,6,opt,name=inclusion_proof,json=inclusionProof,proto3" json:"inclusion_proof,omitempty"` + // Optional. The canonicalized transparency log entry, used to + // reconstruct the Signed Entry Timestamp (SET) during verification. + // The contents of this field are the same as the `body` field in + // a Rekor response, meaning that it does **not** include the "full" + // canonicalized form (of log index, ID, etc.) which are + // exposed as separate fields. The verifier is responsible for + // combining the `canonicalized_body`, `log_index`, `log_id`, + // and `integrated_time` into the payload that the SET's signature + // is generated over. + // This field is intended to be used in cases where the SET cannot be + // produced determinisitically (e.g. inconsistent JSON field ordering, + // differing whitespace, etc). + // + // If set, clients MUST verify that the signature referenced in the + // `canonicalized_body` matches the signature provided in the + // `Bundle.content`. + // If not set, clients are responsible for constructing an equivalent + // payload from other sources to verify the signature. + CanonicalizedBody []byte `protobuf:"bytes,7,opt,name=canonicalized_body,json=canonicalizedBody,proto3" json:"canonicalized_body,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransparencyLogEntry) Reset() { + *x = TransparencyLogEntry{} + mi := &file_sigstore_rekor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransparencyLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparencyLogEntry) ProtoMessage() {} + +func (x *TransparencyLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparencyLogEntry.ProtoReflect.Descriptor instead. +func (*TransparencyLogEntry) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{4} +} + +func (x *TransparencyLogEntry) GetLogIndex() int64 { + if x != nil { + return x.LogIndex + } + return 0 +} + +func (x *TransparencyLogEntry) GetLogId() *v1.LogId { + if x != nil { + return x.LogId + } + return nil +} + +func (x *TransparencyLogEntry) GetKindVersion() *KindVersion { + if x != nil { + return x.KindVersion + } + return nil +} + +func (x *TransparencyLogEntry) GetIntegratedTime() int64 { + if x != nil { + return x.IntegratedTime + } + return 0 +} + +func (x *TransparencyLogEntry) GetInclusionPromise() *InclusionPromise { + if x != nil { + return x.InclusionPromise + } + return nil +} + +func (x *TransparencyLogEntry) GetInclusionProof() *InclusionProof { + if x != nil { + return x.InclusionProof + } + return nil +} + +func (x *TransparencyLogEntry) GetCanonicalizedBody() []byte { + if x != nil { + return x.CanonicalizedBody + } + return nil +} + +var File_sigstore_rekor_proto protoreflect.FileDescriptor + +var file_sigstore_rekor_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x45, 0x0a, 0x0b, 0x4b, 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1d, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2d, 0x0a, 0x0a, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x08, 0x65, 0x6e, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x0e, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x20, + 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x20, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x10, 0x49, 0x6e, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x16, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xc7, 0x03, 0x0a, 0x14, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x20, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x39, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, + 0x49, 0x64, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x4a, + 0x0a, 0x0c, 0x6b, 0x69, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6e, + 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6b, + 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0f, 0x69, 0x6e, + 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x52, 0x10, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x53, + 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6f, + 0x64, 0x79, 0x42, 0x78, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x42, 0x0a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, + 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sigstore_rekor_proto_rawDescOnce sync.Once + file_sigstore_rekor_proto_rawDescData = file_sigstore_rekor_proto_rawDesc +) + +func file_sigstore_rekor_proto_rawDescGZIP() []byte { + file_sigstore_rekor_proto_rawDescOnce.Do(func() { + file_sigstore_rekor_proto_rawDescData = protoimpl.X.CompressGZIP(file_sigstore_rekor_proto_rawDescData) + }) + return file_sigstore_rekor_proto_rawDescData +} + +var file_sigstore_rekor_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_sigstore_rekor_proto_goTypes = []any{ + (*KindVersion)(nil), // 0: dev.sigstore.rekor.v1.KindVersion + (*Checkpoint)(nil), // 1: dev.sigstore.rekor.v1.Checkpoint + (*InclusionProof)(nil), // 2: dev.sigstore.rekor.v1.InclusionProof + (*InclusionPromise)(nil), // 3: dev.sigstore.rekor.v1.InclusionPromise + (*TransparencyLogEntry)(nil), // 4: dev.sigstore.rekor.v1.TransparencyLogEntry + (*v1.LogId)(nil), // 5: dev.sigstore.common.v1.LogId +} +var file_sigstore_rekor_proto_depIdxs = []int32{ + 1, // 0: dev.sigstore.rekor.v1.InclusionProof.checkpoint:type_name -> dev.sigstore.rekor.v1.Checkpoint + 5, // 1: dev.sigstore.rekor.v1.TransparencyLogEntry.log_id:type_name -> dev.sigstore.common.v1.LogId + 0, // 2: dev.sigstore.rekor.v1.TransparencyLogEntry.kind_version:type_name -> dev.sigstore.rekor.v1.KindVersion + 3, // 3: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_promise:type_name -> dev.sigstore.rekor.v1.InclusionPromise + 2, // 4: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_proof:type_name -> dev.sigstore.rekor.v1.InclusionProof + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_sigstore_rekor_proto_init() } +func file_sigstore_rekor_proto_init() { + if File_sigstore_rekor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sigstore_rekor_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_rekor_proto_goTypes, + DependencyIndexes: file_sigstore_rekor_proto_depIdxs, + MessageInfos: file_sigstore_rekor_proto_msgTypes, + }.Build() + File_sigstore_rekor_proto = out.File + file_sigstore_rekor_proto_rawDesc = nil + file_sigstore_rekor_proto_goTypes = nil + file_sigstore_rekor_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go new file mode 100644 index 00000000..8888d385 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go @@ -0,0 +1,699 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.3 +// protoc v5.29.3 +// source: sigstore_trustroot.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// TransparencyLogInstance describes the immutable parameters from a +// transparency log. +// See https://www.rfc-editor.org/rfc/rfc9162.html#name-log-parameters +// for more details. +// The included parameters are the minimal set required to identify a log, +// and verify an inclusion proof/promise. +type TransparencyLogInstance struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The base URL at which can be used to URLs for the client. + BaseUrl string `protobuf:"bytes,1,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty"` + // The hash algorithm used for the Merkle Tree. + HashAlgorithm v1.HashAlgorithm `protobuf:"varint,2,opt,name=hash_algorithm,json=hashAlgorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"hash_algorithm,omitempty"` + // The public key used to verify signatures generated by the log. + // This attribute contains the signature algorithm used by the log. + PublicKey *v1.PublicKey `protobuf:"bytes,3,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // The unique identifier for this transparency log. + // Represented as the SHA-256 hash of the log's public key, + // calculated over the DER encoding of the key represented as + // SubjectPublicKeyInfo. + // See https://www.rfc-editor.org/rfc/rfc6962#section-3.2 + LogId *v1.LogId `protobuf:"bytes,4,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + // The checkpoint key identifier for the log used in a checkpoint. + // Optional, not provided for logs that do not generate checkpoints. + // For logs that do generate checkpoints, if not set, assume + // log_id equals checkpoint_key_id. + // Follows the specification described here + // for ECDSA and Ed25519 signatures: + // https://github.com/C2SP/C2SP/blob/main/signed-note.md#signatures + // For RSA signatures, the key ID will match the ECDSA format, the + // hashed DER-encoded SPKI public key. Publicly witnessed logs MUST NOT + // use RSA-signed checkpoints, since witnesses do not support + // RSA signatures. + // This is provided for convenience. Clients can also calculate the + // checkpoint key ID given the log's public key. + // SHOULD be set for logs generating Ed25519 signatures. + // SHOULD be 4 bytes long, as a truncated hash. + CheckpointKeyId *v1.LogId `protobuf:"bytes,5,opt,name=checkpoint_key_id,json=checkpointKeyId,proto3" json:"checkpoint_key_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransparencyLogInstance) Reset() { + *x = TransparencyLogInstance{} + mi := &file_sigstore_trustroot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransparencyLogInstance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparencyLogInstance) ProtoMessage() {} + +func (x *TransparencyLogInstance) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparencyLogInstance.ProtoReflect.Descriptor instead. +func (*TransparencyLogInstance) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{0} +} + +func (x *TransparencyLogInstance) GetBaseUrl() string { + if x != nil { + return x.BaseUrl + } + return "" +} + +func (x *TransparencyLogInstance) GetHashAlgorithm() v1.HashAlgorithm { + if x != nil { + return x.HashAlgorithm + } + return v1.HashAlgorithm(0) +} + +func (x *TransparencyLogInstance) GetPublicKey() *v1.PublicKey { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *TransparencyLogInstance) GetLogId() *v1.LogId { + if x != nil { + return x.LogId + } + return nil +} + +func (x *TransparencyLogInstance) GetCheckpointKeyId() *v1.LogId { + if x != nil { + return x.CheckpointKeyId + } + return nil +} + +// CertificateAuthority enlists the information required to identify which +// CA to use and perform signature verification. +type CertificateAuthority struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The root certificate MUST be self-signed, and so the subject and + // issuer are the same. + Subject *v1.DistinguishedName `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // The URI identifies the certificate authority. + // + // It is RECOMMENDED that the URI is the base URL for the certificate + // authority, that can be provided to any SDK/client provided + // by the certificate authority to interact with the certificate + // authority. + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + // The certificate chain for this CA. The last certificate in the chain + // MUST be the trust anchor. The trust anchor MAY be a self-signed root + // CA certificate or MAY be an intermediate CA certificate. + CertChain *v1.X509CertificateChain `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` + // The time the *entire* chain was valid. This is at max the + // longest interval when *all* certificates in the chain were valid, + // but it MAY be shorter. Clients MUST check timestamps against *both* + // the `valid_for` time range *and* the entire certificate chain. + // + // The TimeRange should be considered valid *inclusive* of the + // endpoints. + ValidFor *v1.TimeRange `protobuf:"bytes,4,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CertificateAuthority) Reset() { + *x = CertificateAuthority{} + mi := &file_sigstore_trustroot_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CertificateAuthority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateAuthority) ProtoMessage() {} + +func (x *CertificateAuthority) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateAuthority.ProtoReflect.Descriptor instead. +func (*CertificateAuthority) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{1} +} + +func (x *CertificateAuthority) GetSubject() *v1.DistinguishedName { + if x != nil { + return x.Subject + } + return nil +} + +func (x *CertificateAuthority) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *CertificateAuthority) GetCertChain() *v1.X509CertificateChain { + if x != nil { + return x.CertChain + } + return nil +} + +func (x *CertificateAuthority) GetValidFor() *v1.TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +// TrustedRoot describes the client's complete set of trusted entities. +// How the TrustedRoot is populated is not specified, but can be a +// combination of many sources such as TUF repositories, files on disk etc. +// +// The TrustedRoot is not meant to be used for any artifact verification, only +// to capture the complete/global set of trusted verification materials. +// When verifying an artifact, based on the artifact and policies, a selection +// of keys/authorities are expected to be extracted and provided to the +// verification function. This way the set of keys/authorities can be kept to +// a minimal set by the policy to gain better control over what signatures +// that are allowed. +// +// The embedded transparency logs, CT logs, CAs and TSAs MUST include any +// previously used instance -- otherwise signatures made in the past cannot +// be verified. +// +// All the listed instances SHOULD be sorted by the 'valid_for' in ascending +// order, that is, the oldest instance first. Only the last instance is +// allowed to have their 'end' timestamp unset. All previous instances MUST +// have a closed interval of validity. The last instance MAY have a closed +// interval. Clients MUST accept instances that overlaps in time, if not +// clients may experience problems during rotations of verification +// materials. +// +// To be able to manage planned rotations of either transparency logs or +// certificate authorities, clienst MUST accept lists of instances where +// the last instance have a 'valid_for' that belongs to the future. +// This should not be a problem as clients SHOULD first seek the trust root +// for a suitable instance before creating a per artifact trust root (that +// is, a sub-set of the complete trust root) that is used for verification. +type TrustedRoot struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.trustedroot.v0.1+json + // when encoded as JSON. + // Clients MUST be able to process and parse content with the media + // type defined in the old format: + // application/vnd.dev.sigstore.trustedroot+json;version=0.1 + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // A set of trusted Rekor servers. + Tlogs []*TransparencyLogInstance `protobuf:"bytes,2,rep,name=tlogs,proto3" json:"tlogs,omitempty"` + // A set of trusted certificate authorities (e.g Fulcio), and any + // intermediate certificates they provide. + // If a CA is issuing multiple intermediate certificate, each + // combination shall be represented as separate chain. I.e, a single + // root cert may appear in multiple chains but with different + // intermediate and/or leaf certificates. + // The certificates are intended to be used for verifying artifact + // signatures. + CertificateAuthorities []*CertificateAuthority `protobuf:"bytes,3,rep,name=certificate_authorities,json=certificateAuthorities,proto3" json:"certificate_authorities,omitempty"` + // A set of trusted certificate transparency logs. + Ctlogs []*TransparencyLogInstance `protobuf:"bytes,4,rep,name=ctlogs,proto3" json:"ctlogs,omitempty"` + // A set of trusted timestamping authorities. + TimestampAuthorities []*CertificateAuthority `protobuf:"bytes,5,rep,name=timestamp_authorities,json=timestampAuthorities,proto3" json:"timestamp_authorities,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TrustedRoot) Reset() { + *x = TrustedRoot{} + mi := &file_sigstore_trustroot_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TrustedRoot) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TrustedRoot) ProtoMessage() {} + +func (x *TrustedRoot) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TrustedRoot.ProtoReflect.Descriptor instead. +func (*TrustedRoot) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{2} +} + +func (x *TrustedRoot) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *TrustedRoot) GetTlogs() []*TransparencyLogInstance { + if x != nil { + return x.Tlogs + } + return nil +} + +func (x *TrustedRoot) GetCertificateAuthorities() []*CertificateAuthority { + if x != nil { + return x.CertificateAuthorities + } + return nil +} + +func (x *TrustedRoot) GetCtlogs() []*TransparencyLogInstance { + if x != nil { + return x.Ctlogs + } + return nil +} + +func (x *TrustedRoot) GetTimestampAuthorities() []*CertificateAuthority { + if x != nil { + return x.TimestampAuthorities + } + return nil +} + +// SigningConfig represents the trusted entities/state needed by Sigstore +// signing. In particular, it primarily contains service URLs that a Sigstore +// signer may need to connect to for the online aspects of signing. +type SigningConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.signingconfig.v0.1+json + MediaType string `protobuf:"bytes,5,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // A URL to a Fulcio-compatible CA, capable of receiving + // Certificate Signing Requests (CSRs) and responding with + // issued certificates. + // + // This URL **MUST** be the "base" URL for the CA, which clients + // should construct an appropriate CSR endpoint on top of. + // For example, if `ca_url` is `https://example.com/ca`, then + // the client **MAY** construct the CSR endpoint as + // `https://example.com/ca/api/v2/signingCert`. + CaUrl string `protobuf:"bytes,1,opt,name=ca_url,json=caUrl,proto3" json:"ca_url,omitempty"` + // A URL to an OpenID Connect identity provider. + // + // This URL **MUST** be the "base" URL for the OIDC IdP, which clients + // should perform well-known OpenID Connect discovery against. + OidcUrl string `protobuf:"bytes,2,opt,name=oidc_url,json=oidcUrl,proto3" json:"oidc_url,omitempty"` + // One or more URLs to Rekor-compatible transparency log. + // + // Each URL **MUST** be the "base" URL for the transparency log, + // which clients should construct appropriate API endpoints on top of. + TlogUrls []string `protobuf:"bytes,3,rep,name=tlog_urls,json=tlogUrls,proto3" json:"tlog_urls,omitempty"` + // One ore more URLs to RFC 3161 Time Stamping Authority (TSA). + // + // Each URL **MUST** be the **full** URL for the TSA, meaning that it + // should be suitable for submitting Time Stamp Requests (TSRs) to + // via HTTP, per RFC 3161. + TsaUrls []string `protobuf:"bytes,4,rep,name=tsa_urls,json=tsaUrls,proto3" json:"tsa_urls,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SigningConfig) Reset() { + *x = SigningConfig{} + mi := &file_sigstore_trustroot_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SigningConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SigningConfig) ProtoMessage() {} + +func (x *SigningConfig) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SigningConfig.ProtoReflect.Descriptor instead. +func (*SigningConfig) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{3} +} + +func (x *SigningConfig) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *SigningConfig) GetCaUrl() string { + if x != nil { + return x.CaUrl + } + return "" +} + +func (x *SigningConfig) GetOidcUrl() string { + if x != nil { + return x.OidcUrl + } + return "" +} + +func (x *SigningConfig) GetTlogUrls() []string { + if x != nil { + return x.TlogUrls + } + return nil +} + +func (x *SigningConfig) GetTsaUrls() []string { + if x != nil { + return x.TsaUrls + } + return nil +} + +// ClientTrustConfig describes the complete state needed by a client +// to perform both signing and verification operations against a particular +// instance of Sigstore. +type ClientTrustConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.clienttrustconfig.v0.1+json + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // The root of trust, which MUST be present. + TrustedRoot *TrustedRoot `protobuf:"bytes,2,opt,name=trusted_root,json=trustedRoot,proto3" json:"trusted_root,omitempty"` + // Configuration for signing clients, which MUST be present. + SigningConfig *SigningConfig `protobuf:"bytes,3,opt,name=signing_config,json=signingConfig,proto3" json:"signing_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClientTrustConfig) Reset() { + *x = ClientTrustConfig{} + mi := &file_sigstore_trustroot_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClientTrustConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientTrustConfig) ProtoMessage() {} + +func (x *ClientTrustConfig) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientTrustConfig.ProtoReflect.Descriptor instead. +func (*ClientTrustConfig) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{4} +} + +func (x *ClientTrustConfig) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ClientTrustConfig) GetTrustedRoot() *TrustedRoot { + if x != nil { + return x.TrustedRoot + } + return nil +} + +func (x *ClientTrustConfig) GetSigningConfig() *SigningConfig { + if x != nil { + return x.SigningConfig + } + return nil +} + +var File_sigstore_trustroot_proto protoreflect.FileDescriptor + +var file_sigstore_trustroot_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, + 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x02, + 0x0a, 0x17, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, + 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x73, + 0x65, 0x55, 0x72, 0x6c, 0x12, 0x4c, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, + 0x67, 0x49, 0x64, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x6f, 0x67, 0x49, 0x64, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4b, 0x65, 0x79, 0x49, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x14, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x43, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x75, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x4b, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, + 0x6f, 0x72, 0x22, 0x92, 0x03, 0x0a, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x48, 0x0a, 0x05, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x16, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, + 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, + 0x73, 0x12, 0x64, 0x0a, 0x15, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x98, 0x01, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x63, 0x61, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x61, 0x55, 0x72, 0x6c, 0x12, + 0x19, 0x0a, 0x08, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6f, 0x69, 0x64, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6c, + 0x6f, 0x67, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6c, 0x6f, 0x67, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x73, 0x61, 0x5f, 0x75, + 0x72, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x73, 0x61, 0x55, 0x72, + 0x6c, 0x73, 0x22, 0xd8, 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, + 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, + 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x54, 0x0a, 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, + 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, + 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x88, 0x01, + 0x0a, 0x1f, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, + 0x31, 0x42, 0x0e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, + 0x6f, 0x2f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2f, 0x76, 0x31, 0xea, 0x02, + 0x17, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x54, 0x72, 0x75, 0x73, 0x74, + 0x52, 0x6f, 0x6f, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sigstore_trustroot_proto_rawDescOnce sync.Once + file_sigstore_trustroot_proto_rawDescData = file_sigstore_trustroot_proto_rawDesc +) + +func file_sigstore_trustroot_proto_rawDescGZIP() []byte { + file_sigstore_trustroot_proto_rawDescOnce.Do(func() { + file_sigstore_trustroot_proto_rawDescData = protoimpl.X.CompressGZIP(file_sigstore_trustroot_proto_rawDescData) + }) + return file_sigstore_trustroot_proto_rawDescData +} + +var file_sigstore_trustroot_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_sigstore_trustroot_proto_goTypes = []any{ + (*TransparencyLogInstance)(nil), // 0: dev.sigstore.trustroot.v1.TransparencyLogInstance + (*CertificateAuthority)(nil), // 1: dev.sigstore.trustroot.v1.CertificateAuthority + (*TrustedRoot)(nil), // 2: dev.sigstore.trustroot.v1.TrustedRoot + (*SigningConfig)(nil), // 3: dev.sigstore.trustroot.v1.SigningConfig + (*ClientTrustConfig)(nil), // 4: dev.sigstore.trustroot.v1.ClientTrustConfig + (v1.HashAlgorithm)(0), // 5: dev.sigstore.common.v1.HashAlgorithm + (*v1.PublicKey)(nil), // 6: dev.sigstore.common.v1.PublicKey + (*v1.LogId)(nil), // 7: dev.sigstore.common.v1.LogId + (*v1.DistinguishedName)(nil), // 8: dev.sigstore.common.v1.DistinguishedName + (*v1.X509CertificateChain)(nil), // 9: dev.sigstore.common.v1.X509CertificateChain + (*v1.TimeRange)(nil), // 10: dev.sigstore.common.v1.TimeRange +} +var file_sigstore_trustroot_proto_depIdxs = []int32{ + 5, // 0: dev.sigstore.trustroot.v1.TransparencyLogInstance.hash_algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 6, // 1: dev.sigstore.trustroot.v1.TransparencyLogInstance.public_key:type_name -> dev.sigstore.common.v1.PublicKey + 7, // 2: dev.sigstore.trustroot.v1.TransparencyLogInstance.log_id:type_name -> dev.sigstore.common.v1.LogId + 7, // 3: dev.sigstore.trustroot.v1.TransparencyLogInstance.checkpoint_key_id:type_name -> dev.sigstore.common.v1.LogId + 8, // 4: dev.sigstore.trustroot.v1.CertificateAuthority.subject:type_name -> dev.sigstore.common.v1.DistinguishedName + 9, // 5: dev.sigstore.trustroot.v1.CertificateAuthority.cert_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain + 10, // 6: dev.sigstore.trustroot.v1.CertificateAuthority.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 0, // 7: dev.sigstore.trustroot.v1.TrustedRoot.tlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance + 1, // 8: dev.sigstore.trustroot.v1.TrustedRoot.certificate_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority + 0, // 9: dev.sigstore.trustroot.v1.TrustedRoot.ctlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance + 1, // 10: dev.sigstore.trustroot.v1.TrustedRoot.timestamp_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority + 2, // 11: dev.sigstore.trustroot.v1.ClientTrustConfig.trusted_root:type_name -> dev.sigstore.trustroot.v1.TrustedRoot + 3, // 12: dev.sigstore.trustroot.v1.ClientTrustConfig.signing_config:type_name -> dev.sigstore.trustroot.v1.SigningConfig + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_sigstore_trustroot_proto_init() } +func file_sigstore_trustroot_proto_init() { + if File_sigstore_trustroot_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sigstore_trustroot_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_trustroot_proto_goTypes, + DependencyIndexes: file_sigstore_trustroot_proto_depIdxs, + MessageInfos: file_sigstore_trustroot_proto_msgTypes, + }.Build() + File_sigstore_trustroot_proto = out.File + file_sigstore_trustroot_proto_rawDesc = nil + file_sigstore_trustroot_proto_goTypes = nil + file_sigstore_trustroot_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md index 0113fcad..bdff0276 100644 --- a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md +++ b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md @@ -109,6 +109,12 @@ to github): git push origin your-branch --force Alternatively, a core member can squash your commits within Github. + +## DCO Signoff + +Make sure to sign the [Developer Certificate of +Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff). + ## Code of Conduct Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. diff --git a/vendor/github.com/sigstore/rekor/pkg/client/options.go b/vendor/github.com/sigstore/rekor/pkg/client/options.go index f42b6598..c1135a71 100644 --- a/vendor/github.com/sigstore/rekor/pkg/client/options.go +++ b/vendor/github.com/sigstore/rekor/pkg/client/options.go @@ -16,6 +16,7 @@ package client import ( "net/http" + "time" "github.com/hashicorp/go-retryablehttp" ) @@ -24,10 +25,14 @@ import ( type Option func(*options) type options struct { - UserAgent string - RetryCount uint - InsecureTLS bool - Logger interface{} + UserAgent string + RetryCount uint + RetryWaitMin time.Duration + RetryWaitMax time.Duration + InsecureTLS bool + Logger interface{} + NoDisableKeepalives bool + Headers map[string][]string } const ( @@ -62,6 +67,20 @@ func WithRetryCount(retryCount uint) Option { } } +// WithRetryWaitMin sets the minimum length of time to wait between retries. +func WithRetryWaitMin(t time.Duration) Option { + return func(o *options) { + o.RetryWaitMin = t + } +} + +// WithRetryWaitMax sets the minimum length of time to wait between retries. +func WithRetryWaitMax(t time.Duration) Option { + return func(o *options) { + o.RetryWaitMax = t + } +} + // WithLogger sets the logger; it must implement either retryablehttp.Logger or retryablehttp.LeveledLogger; if not, this will not take effect. func WithLogger(logger interface{}) Option { return func(o *options) { @@ -72,20 +91,41 @@ func WithLogger(logger interface{}) Option { } } +// WithInsecureTLS disables TLS verification. func WithInsecureTLS(enabled bool) Option { return func(o *options) { o.InsecureTLS = enabled } } +// WithNoDisableKeepalives unsets the default DisableKeepalives setting. +func WithNoDisableKeepalives(noDisableKeepalives bool) Option { + return func(o *options) { + o.NoDisableKeepalives = noDisableKeepalives + } +} + +// WithHeaders sets default headers for every client request. +func WithHeaders(h map[string][]string) Option { + return func(o *options) { + o.Headers = h + } +} + type roundTripper struct { http.RoundTripper UserAgent string + Headers map[string][]string } // RoundTrip implements `http.RoundTripper` func (rt *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { req.Header.Set("User-Agent", rt.UserAgent) + for k, v := range rt.Headers { + for _, h := range v { + req.Header.Add(k, h) + } + } return rt.RoundTripper.RoundTrip(req) } @@ -93,12 +133,13 @@ func createRoundTripper(inner http.RoundTripper, o *options) http.RoundTripper { if inner == nil { inner = http.DefaultTransport } - if o.UserAgent == "" { + if o.UserAgent == "" && o.Headers == nil { // There's nothing to do... return inner } return &roundTripper{ RoundTripper: inner, UserAgent: o.UserAgent, + Headers: o.Headers, } } diff --git a/vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go b/vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go index 601dd9d3..470ca5ea 100644 --- a/vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go @@ -22,6 +22,7 @@ import ( "github.com/go-openapi/runtime" httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" + "github.com/hashicorp/go-cleanhttp" retryablehttp "github.com/hashicorp/go-retryablehttp" "github.com/sigstore/rekor/pkg/generated/client" @@ -37,6 +38,9 @@ func GetRekorClient(rekorServerURL string, opts ...Option) (*client.Rekor, error retryableClient := retryablehttp.NewClient() defaultTransport := cleanhttp.DefaultTransport() + if o.NoDisableKeepalives { + defaultTransport.DisableKeepAlives = false + } if o.InsecureTLS { /* #nosec G402 */ defaultTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} @@ -45,6 +49,8 @@ func GetRekorClient(rekorServerURL string, opts ...Option) (*client.Rekor, error Transport: defaultTransport, } retryableClient.RetryMax = int(o.RetryCount) + retryableClient.RetryWaitMin = o.RetryWaitMin + retryableClient.RetryWaitMax = o.RetryWaitMax retryableClient.Logger = o.Logger httpClient := retryableClient.StandardClient() diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go index 68ddd71e..9b2845e7 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go @@ -22,6 +22,7 @@ package entries // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -126,11 +127,13 @@ func (o *CreateLogEntryCreated) Code() int { } func (o *CreateLogEntryCreated) Error() string { - return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %+v", 201, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload) } func (o *CreateLogEntryCreated) String() string { - return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %+v", 201, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload) } func (o *CreateLogEntryCreated) GetPayload() models.LogEntry { @@ -210,11 +213,13 @@ func (o *CreateLogEntryBadRequest) Code() int { } func (o *CreateLogEntryBadRequest) Error() string { - return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload) } func (o *CreateLogEntryBadRequest) String() string { - return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload) } func (o *CreateLogEntryBadRequest) GetPayload() *models.Error { @@ -280,11 +285,13 @@ func (o *CreateLogEntryConflict) Code() int { } func (o *CreateLogEntryConflict) Error() string { - return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %+v", 409, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload) } func (o *CreateLogEntryConflict) String() string { - return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %+v", 409, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload) } func (o *CreateLogEntryConflict) GetPayload() *models.Error { @@ -363,11 +370,13 @@ func (o *CreateLogEntryDefault) Code() int { } func (o *CreateLogEntryDefault) Error() string { - return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload) } func (o *CreateLogEntryDefault) String() string { - return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload) } func (o *CreateLogEntryDefault) GetPayload() *models.Error { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go index fe2630ea..259b38ee 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go @@ -23,6 +23,7 @@ package entries import ( "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new entries API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new entries API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for entries API */ @@ -39,7 +65,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go index 52f0b85f..4268f756 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go @@ -22,6 +22,7 @@ package entries // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -108,11 +109,13 @@ func (o *GetLogEntryByIndexOK) Code() int { } func (o *GetLogEntryByIndexOK) Error() string { - return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload) } func (o *GetLogEntryByIndexOK) String() string { - return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload) } func (o *GetLogEntryByIndexOK) GetPayload() models.LogEntry { @@ -173,11 +176,11 @@ func (o *GetLogEntryByIndexNotFound) Code() int { } func (o *GetLogEntryByIndexNotFound) Error() string { - return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound ", 404) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404) } func (o *GetLogEntryByIndexNotFound) String() string { - return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound ", 404) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404) } func (o *GetLogEntryByIndexNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -234,11 +237,13 @@ func (o *GetLogEntryByIndexDefault) Code() int { } func (o *GetLogEntryByIndexDefault) Error() string { - return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload) } func (o *GetLogEntryByIndexDefault) String() string { - return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload) } func (o *GetLogEntryByIndexDefault) GetPayload() *models.Error { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go index e33a3a42..df6ede58 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go @@ -22,6 +22,7 @@ package entries // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -108,11 +109,13 @@ func (o *GetLogEntryByUUIDOK) Code() int { } func (o *GetLogEntryByUUIDOK) Error() string { - return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload) } func (o *GetLogEntryByUUIDOK) String() string { - return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload) } func (o *GetLogEntryByUUIDOK) GetPayload() models.LogEntry { @@ -173,11 +176,11 @@ func (o *GetLogEntryByUUIDNotFound) Code() int { } func (o *GetLogEntryByUUIDNotFound) Error() string { - return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound ", 404) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404) } func (o *GetLogEntryByUUIDNotFound) String() string { - return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound ", 404) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404) } func (o *GetLogEntryByUUIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -234,11 +237,13 @@ func (o *GetLogEntryByUUIDDefault) Code() int { } func (o *GetLogEntryByUUIDDefault) Error() string { - return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload) } func (o *GetLogEntryByUUIDDefault) String() string { - return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload) } func (o *GetLogEntryByUUIDDefault) GetPayload() *models.Error { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go index 5be4e3d2..e064bcde 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go @@ -22,6 +22,7 @@ package entries // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -114,11 +115,13 @@ func (o *SearchLogQueryOK) Code() int { } func (o *SearchLogQueryOK) Error() string { - return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload) } func (o *SearchLogQueryOK) String() string { - return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload) } func (o *SearchLogQueryOK) GetPayload() []models.LogEntry { @@ -180,11 +183,13 @@ func (o *SearchLogQueryBadRequest) Code() int { } func (o *SearchLogQueryBadRequest) Error() string { - return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload) } func (o *SearchLogQueryBadRequest) String() string { - return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload) } func (o *SearchLogQueryBadRequest) GetPayload() *models.Error { @@ -248,11 +253,13 @@ func (o *SearchLogQueryUnprocessableEntity) Code() int { } func (o *SearchLogQueryUnprocessableEntity) Error() string { - return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %+v", 422, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload) } func (o *SearchLogQueryUnprocessableEntity) String() string { - return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %+v", 422, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload) } func (o *SearchLogQueryUnprocessableEntity) GetPayload() *models.Error { @@ -320,11 +327,13 @@ func (o *SearchLogQueryDefault) Code() int { } func (o *SearchLogQueryDefault) Error() string { - return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload) } func (o *SearchLogQueryDefault) String() string { - return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload) } func (o *SearchLogQueryDefault) GetPayload() *models.Error { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go index cf5a83ca..e5262055 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go @@ -23,6 +23,7 @@ package index import ( "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new index API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new index API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for index API */ @@ -39,7 +65,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go index c9205a15..0fa2a34d 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go @@ -22,6 +22,7 @@ package index // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -108,11 +109,13 @@ func (o *SearchIndexOK) Code() int { } func (o *SearchIndexOK) Error() string { - return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload) } func (o *SearchIndexOK) String() string { - return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload) } func (o *SearchIndexOK) GetPayload() []string { @@ -174,11 +177,13 @@ func (o *SearchIndexBadRequest) Code() int { } func (o *SearchIndexBadRequest) Error() string { - return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload) } func (o *SearchIndexBadRequest) String() string { - return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload) } func (o *SearchIndexBadRequest) GetPayload() *models.Error { @@ -246,11 +251,13 @@ func (o *SearchIndexDefault) Code() int { } func (o *SearchIndexDefault) Error() string { - return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload) } func (o *SearchIndexDefault) String() string { - return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload) } func (o *SearchIndexDefault) GetPayload() *models.Error { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go index 9b33f6db..c9e33d7b 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go @@ -22,6 +22,7 @@ package pubkey // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -102,11 +103,13 @@ func (o *GetPublicKeyOK) Code() int { } func (o *GetPublicKeyOK) Error() string { - return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload) } func (o *GetPublicKeyOK) String() string { - return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload) } func (o *GetPublicKeyOK) GetPayload() string { @@ -172,11 +175,13 @@ func (o *GetPublicKeyDefault) Code() int { } func (o *GetPublicKeyDefault) Error() string { - return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload) } func (o *GetPublicKeyDefault) String() string { - return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload) } func (o *GetPublicKeyDefault) GetPayload() *models.Error { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go index 714d2de2..64b52226 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go @@ -23,6 +23,7 @@ package pubkey import ( "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new pubkey API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new pubkey API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for pubkey API */ @@ -39,9 +65,33 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) +// This client is generated with a few options you might find useful for your swagger spec. +// +// Feel free to add you own set of options. + +// WithAccept allows the client to force the Accept header +// to negotiate a specific Producer from the server. +// +// You may use this option to set arbitrary extensions to your MIME media type. +func WithAccept(mime string) ClientOption { + return func(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{mime} + } +} + +// WithAcceptApplicationJSON sets the Accept header to "application/json". +func WithAcceptApplicationJSON(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{"application/json"} +} + +// WithAcceptApplicationxPemFile sets the Accept header to "application/x-pem-file". +func WithAcceptApplicationxPemFile(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{"application/x-pem-file"} +} + // ClientService is the interface for Client methods type ClientService interface { GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error) diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go index a010a72f..a43ac752 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go @@ -22,6 +22,7 @@ package tlog // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -102,11 +103,13 @@ func (o *GetLogInfoOK) Code() int { } func (o *GetLogInfoOK) Error() string { - return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload) } func (o *GetLogInfoOK) String() string { - return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload) } func (o *GetLogInfoOK) GetPayload() *models.LogInfo { @@ -174,11 +177,13 @@ func (o *GetLogInfoDefault) Code() int { } func (o *GetLogInfoDefault) Error() string { - return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload) } func (o *GetLogInfoDefault) String() string { - return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload) } func (o *GetLogInfoDefault) GetPayload() *models.Error { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go index f0cf747c..9dc9d585 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go @@ -22,6 +22,7 @@ package tlog // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -108,11 +109,13 @@ func (o *GetLogProofOK) Code() int { } func (o *GetLogProofOK) Error() string { - return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload) } func (o *GetLogProofOK) String() string { - return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload) } func (o *GetLogProofOK) GetPayload() *models.ConsistencyProof { @@ -176,11 +179,13 @@ func (o *GetLogProofBadRequest) Code() int { } func (o *GetLogProofBadRequest) Error() string { - return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload) } func (o *GetLogProofBadRequest) String() string { - return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload) } func (o *GetLogProofBadRequest) GetPayload() *models.Error { @@ -248,11 +253,13 @@ func (o *GetLogProofDefault) Code() int { } func (o *GetLogProofDefault) Error() string { - return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload) } func (o *GetLogProofDefault) String() string { - return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %+v", o._statusCode, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload) } func (o *GetLogProofDefault) GetPayload() *models.Error { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go index f53f6c7f..9befb5c9 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go @@ -23,6 +23,7 @@ package tlog import ( "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new tlog API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new tlog API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for tlog API */ @@ -39,7 +65,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go index 250a6125..a239c84f 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go @@ -294,7 +294,7 @@ type AlpineV001SchemaPackageHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the package diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go index 93dce871..5818dca1 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go @@ -307,7 +307,7 @@ type CoseV001SchemaDataEnvelopeHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the envelope @@ -417,7 +417,7 @@ type CoseV001SchemaDataPayloadHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the content diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go index ec4c32bf..5fde2a77 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go @@ -312,7 +312,7 @@ type DSSEV001SchemaEnvelopeHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The value of the computed digest over the entire envelope @@ -422,7 +422,7 @@ type DSSEV001SchemaPayloadHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The value of the computed digest over the payload within the envelope diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go index 387a9392..56034a57 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go @@ -21,9 +21,9 @@ package models // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// HashedrekordSchema Rekor Schema +// HashedrekordSchema Hashedrekord Schema // -// # Schema for Rekord objects +// # Schema for Hashedrekord objects // // swagger:model hashedrekordSchema type HashedrekordSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go index 3b906ae2..586025c5 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go @@ -277,7 +277,7 @@ type HashedrekordV001SchemaDataHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256 sha384 sha512] + // Enum: ["sha256","sha384","sha512"] Algorithm *string `json:"algorithm"` // The hash value for the content, as represented by a lower case hexadecimal string diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go index 930efc87..13c00597 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go @@ -326,7 +326,7 @@ type HelmV001SchemaChartHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the chart diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go index 0c299b1c..6973c729 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go @@ -300,7 +300,7 @@ type IntotoV001SchemaContentHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the archive @@ -410,7 +410,7 @@ type IntotoV001SchemaContentPayloadHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the envelope's payload diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go index c2c08ea5..309073a1 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go @@ -543,7 +543,7 @@ type IntotoV002SchemaContentHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the archive @@ -653,7 +653,7 @@ type IntotoV002SchemaContentPayloadHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value of the payload diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go index 4564964a..2d741f3c 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go @@ -283,7 +283,7 @@ type JarV001SchemaArchiveHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the archive diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go index 9a525717..aaaad9d7 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go @@ -281,7 +281,7 @@ type RekordV001SchemaDataHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the content @@ -396,7 +396,7 @@ type RekordV001SchemaSignature struct { // Specifies the format of the signature // Required: true - // Enum: [pgp minisign x509 ssh] + // Enum: ["pgp","minisign","x509","ssh"] Format *string `json:"format"` // public key diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go index 80dadde7..394eece4 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go @@ -294,7 +294,7 @@ type RpmV001SchemaPackageHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: ["sha256"] Algorithm *string `json:"algorithm"` // The hash value for the package diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go index bb1ccccc..0f66abb5 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go @@ -45,7 +45,7 @@ type SearchIndex struct { Hash string `json:"hash,omitempty"` // operator - // Enum: [and or] + // Enum: ["and","or"] Operator string `json:"operator,omitempty"` // public key @@ -227,7 +227,7 @@ type SearchIndexPublicKey struct { // format // Required: true - // Enum: [pgp x509 minisign ssh tuf] + // Enum: ["pgp","x509","minisign","ssh","tuf"] Format *string `json:"format"` // url diff --git a/vendor/github.com/sigstore/rekor/pkg/tle/tle.go b/vendor/github.com/sigstore/rekor/pkg/tle/tle.go new file mode 100644 index 00000000..2e51f5bc --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/tle/tle.go @@ -0,0 +1,106 @@ +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tle + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "fmt" + + "github.com/go-openapi/runtime" + rekor_pb_common "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + rekor_pb "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" + "google.golang.org/protobuf/encoding/protojson" +) + +// GenerateTransparencyLogEntry returns a sigstore/protobuf-specs compliant message containing a +// TransparencyLogEntry as defined at https://github.com/sigstore/protobuf-specs/blob/main/protos/sigstore_rekor.proto +func GenerateTransparencyLogEntry(anon models.LogEntryAnon) (*rekor_pb.TransparencyLogEntry, error) { + logIDHash, err := hex.DecodeString(*anon.LogID) + if err != nil { + return nil, fmt.Errorf("decoding logID string: %w", err) + } + + rootHash, err := hex.DecodeString(*anon.Verification.InclusionProof.RootHash) + if err != nil { + return nil, fmt.Errorf("decoding inclusion proof root hash: %w", err) + } + + inclusionProofHashes := make([][]byte, len(anon.Verification.InclusionProof.Hashes)) + for i, hash := range anon.Verification.InclusionProof.Hashes { + hashBytes, err := hex.DecodeString(hash) + if err != nil { + return nil, fmt.Errorf("decoding inclusion proof hash: %w", err) + } + inclusionProofHashes[i] = hashBytes + } + + // Different call paths may supply string or []byte. If string, it is base64 encoded. + var body []byte + switch v := anon.Body.(type) { + case string: + b, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, fmt.Errorf("base64 decoding body: %w", err) + } + body = b + case []byte: + body = v + default: + return nil, fmt.Errorf("body is not string or []byte: (%T)%v", v, v) + } + + pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer()) + if err != nil { + return nil, err + } + eimpl, err := types.UnmarshalEntry(pe) + if err != nil { + return nil, err + } + + return &rekor_pb.TransparencyLogEntry{ + LogIndex: *anon.LogIndex, // the global log index + LogId: &rekor_pb_common.LogId{ + KeyId: logIDHash, + }, + KindVersion: &rekor_pb.KindVersion{ + Kind: pe.Kind(), + Version: eimpl.APIVersion(), + }, + IntegratedTime: *anon.IntegratedTime, + InclusionPromise: &rekor_pb.InclusionPromise{ + SignedEntryTimestamp: anon.Verification.SignedEntryTimestamp, + }, + InclusionProof: &rekor_pb.InclusionProof{ + LogIndex: *anon.Verification.InclusionProof.LogIndex, // relative to the specific tree the entry is found in + RootHash: rootHash, + TreeSize: *anon.Verification.InclusionProof.TreeSize, + Hashes: inclusionProofHashes, + Checkpoint: &rekor_pb.Checkpoint{ + Envelope: *anon.Verification.InclusionProof.Checkpoint, + }, + }, + CanonicalizedBody: body, // we don't call eimpl.Canonicalize in the case that the logic is different in this caller vs when it was persisted in the log + }, nil +} + +// MarshalTLEToJSON marshals a TransparencyLogEntry message to JSON according to the protobuf JSON encoding rules +func MarshalTLEToJSON(tle *rekor_pb.TransparencyLogEntry) ([]byte, error) { + return protojson.Marshal(tle) +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/error.go b/vendor/github.com/sigstore/rekor/pkg/types/error.go index 01b8c14d..57f0c775 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/error.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/error.go @@ -17,4 +17,15 @@ package types // ValidationError indicates that there is an issue with the content in the HTTP Request that // should result in an HTTP 400 Bad Request error being returned to the client +// +// Deprecated: use InputValidationError instead to take advantage of Go's error wrapping type ValidationError error + +// InputValidationError indicates that there is an issue with the content in the HTTP Request that +// should result in an HTTP 400 Bad Request error being returned to the client +type InputValidationError struct { + Err error +} + +func (v *InputValidationError) Error() string { return v.Err.Error() } +func (v *InputValidationError) Unwrap() error { return v.Err } diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json index e6104f4c..be9beaeb 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json @@ -1,8 +1,8 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "$id": "http://rekor.sigstore.dev/types/hashedrekord/hasehedrekord_schema.json", - "title": "Rekor Schema", - "description": "Schema for Rekord objects", + "title": "Hashedrekord Schema", + "description": "Schema for Hashedrekord objects", "type": "object", "oneOf": [ { diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go index 74b268ef..b84c1641 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go @@ -108,7 +108,7 @@ func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { sigObj, keyObj, err := v.validate() if err != nil { - return nil, types.ValidationError(err) + return nil, &types.InputValidationError{Err: err} } canonicalEntry := models.HashedrekordV001Schema{} @@ -144,34 +144,34 @@ func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { func (v *V001Entry) validate() (pki.Signature, pki.PublicKey, error) { sig := v.HashedRekordObj.Signature if sig == nil { - return nil, nil, types.ValidationError(errors.New("missing signature")) + return nil, nil, &types.InputValidationError{Err: errors.New("missing signature")} } // Hashed rekord type only works for x509 signature types sigObj, err := x509.NewSignatureWithOpts(bytes.NewReader(sig.Content), options.WithED25519ph()) if err != nil { - return nil, nil, types.ValidationError(err) + return nil, nil, &types.InputValidationError{Err: err} } key := sig.PublicKey if key == nil { - return nil, nil, types.ValidationError(errors.New("missing public key")) + return nil, nil, &types.InputValidationError{Err: errors.New("missing public key")} } keyObj, err := x509.NewPublicKey(bytes.NewReader(key.Content)) if err != nil { - return nil, nil, types.ValidationError(err) + return nil, nil, &types.InputValidationError{Err: err} } data := v.HashedRekordObj.Data if data == nil { - return nil, nil, types.ValidationError(errors.New("missing data")) + return nil, nil, &types.InputValidationError{Err: errors.New("missing data")} } hash := data.Hash if hash == nil { - return nil, nil, types.ValidationError(errors.New("missing hash")) + return nil, nil, &types.InputValidationError{Err: errors.New("missing hash")} } if !govalidator.IsHash(swag.StringValue(hash.Value), swag.StringValue(hash.Algorithm)) { - return nil, nil, types.ValidationError(errors.New("invalid value for hash")) + return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")} } var alg crypto.Hash @@ -189,7 +189,7 @@ func (v *V001Entry) validate() (pki.Signature, pki.PublicKey, error) { return nil, nil, err } if err := sigObj.Verify(nil, keyObj, options.WithDigest(decoded), options.WithCryptoSignerOpts(alg)); err != nil { - return nil, nil, types.ValidationError(fmt.Errorf("verifying signature: %w", err)) + return nil, nil, &types.InputValidationError{Err: fmt.Errorf("verifying signature: %w", err)} } return sigObj, keyObj, nil diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json index 576071ed..3d536eb4 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://rekor.sigstore.dev/types/rekord/rekord_v0_0_1_schema.json", + "$id": "http://rekor.sigstore.dev/types/rekord/hashedrekord_v0_0_1_schema.json", "title": "Hashed Rekor v0.0.1 Schema", "description": "Schema for Hashed Rekord object", "type": "object", @@ -47,7 +47,7 @@ }, "required": [ "algorithm", "value" ] } - } + } } }, "required": [ "signature", "data" ] diff --git a/vendor/github.com/sigstore/rekor/pkg/types/rekord/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/rekord/v0.0.1/entry.go index 2c6412ce..18f47512 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/rekord/v0.0.1/entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/rekord/v0.0.1/entry.go @@ -162,7 +162,7 @@ func (v *V001Entry) fetchExternalEntities(ctx context.Context) (pki.PublicKey, p computedSHA := hex.EncodeToString(hasher.Sum(nil)) if oldSHA != "" && computedSHA != oldSHA { - return closePipesOnError(types.ValidationError(fmt.Errorf("SHA mismatch: %s != %s", computedSHA, oldSHA))) + return closePipesOnError(&types.InputValidationError{Err: fmt.Errorf("SHA mismatch: %s != %s", computedSHA, oldSHA)}) } select { @@ -182,7 +182,7 @@ func (v *V001Entry) fetchExternalEntities(ctx context.Context) (pki.PublicKey, p signature, err := af.NewSignature(sigReadCloser) if err != nil { - return closePipesOnError(types.ValidationError(err)) + return closePipesOnError(&types.InputValidationError{Err: err}) } select { @@ -202,7 +202,7 @@ func (v *V001Entry) fetchExternalEntities(ctx context.Context) (pki.PublicKey, p key, err := af.NewPublicKey(keyReadCloser) if err != nil { - return closePipesOnError(types.ValidationError(err)) + return closePipesOnError(&types.InputValidationError{Err: err}) } select { @@ -226,7 +226,7 @@ func (v *V001Entry) fetchExternalEntities(ctx context.Context) (pki.PublicKey, p var err error if err = sigObj.Verify(sigR, keyObj); err != nil { - return closePipesOnError(types.ValidationError(err)) + return closePipesOnError(&types.InputValidationError{Err: err}) } select { diff --git a/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go index 40e1f385..4c9c8f8a 100644 --- a/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go +++ b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go @@ -18,6 +18,7 @@ package util import ( "bufio" "bytes" + "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" @@ -53,16 +54,14 @@ func (s *SignedNote) Sign(identity string, signer signature.Signer, opts signatu if err != nil { return nil, fmt.Errorf("retrieving public key: %w", err) } - pubKeyBytes, err := x509.MarshalPKIXPublicKey(pk) + pkHash, err := getPublicKeyHash(pk) if err != nil { - return nil, fmt.Errorf("marshalling public key: %w", err) + return nil, err } - pkSha := sha256.Sum256(pubKeyBytes) - signature := note.Signature{ Name: identity, - Hash: binary.BigEndian.Uint32(pkSha[:]), + Hash: pkHash, Base64: base64.StdEncoding.EncodeToString(sig), } @@ -80,15 +79,25 @@ func (s SignedNote) Verify(verifier signature.Verifier) bool { msg := []byte(s.Note) digest := sha256.Sum256(msg) + pk, err := verifier.PublicKey() + if err != nil { + return false + } + verifierPkHash, err := getPublicKeyHash(pk) + if err != nil { + return false + } + for _, s := range s.Signatures { sigBytes, err := base64.StdEncoding.DecodeString(s.Base64) if err != nil { return false } - pk, err := verifier.PublicKey() - if err != nil { + + if s.Hash != verifierPkHash { return false } + opts := []signature.VerifyOption{} switch pk.(type) { case *rsa.PublicKey, *ecdsa.PublicKey: @@ -190,3 +199,13 @@ func SignedNoteValidator(strToValidate string) bool { s := SignedNote{} return s.UnmarshalText([]byte(strToValidate)) == nil } + +func getPublicKeyHash(publicKey crypto.PublicKey) (uint32, error) { + pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return 0, fmt.Errorf("marshalling public key: %w", err) + } + pkSha := sha256.Sum256(pubKeyBytes) + hash := binary.BigEndian.Uint32(pkSha[:]) + return hash, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go new file mode 100644 index 00000000..7b519b58 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go @@ -0,0 +1,234 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/sigstore/rekor/pkg/generated/client" + "github.com/sigstore/rekor/pkg/generated/client/tlog" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/util" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" +) + +// ProveConsistency verifies consistency between an initial, trusted STH +// and a second new STH. Callers MUST verify signature on the STHs'. +func ProveConsistency(ctx context.Context, rClient *client.Rekor, + oldSTH *util.SignedCheckpoint, newSTH *util.SignedCheckpoint, treeID string) error { + oldTreeSize := int64(oldSTH.Size) + switch { + case oldTreeSize == 0: + return errors.New("consistency proofs can not be computed starting from an empty log") + case oldTreeSize == int64(newSTH.Size): + if !bytes.Equal(oldSTH.Hash, newSTH.Hash) { + return errors.New("old root hash does not match STH hash") + } + case oldTreeSize < int64(newSTH.Size): + consistencyParams := tlog.NewGetLogProofParamsWithContext(ctx) + consistencyParams.FirstSize = &oldTreeSize // Root size at the old, or trusted state. + consistencyParams.LastSize = int64(newSTH.Size) // Root size at the new state to verify against. + consistencyParams.TreeID = &treeID + consistencyProof, err := rClient.Tlog.GetLogProof(consistencyParams) + if err != nil { + return err + } + var hashes [][]byte + for _, h := range consistencyProof.Payload.Hashes { + b, err := hex.DecodeString(h) + if err != nil { + return errors.New("error decoding consistency proof hashes") + } + hashes = append(hashes, b) + } + if err := proof.VerifyConsistency(rfc6962.DefaultHasher, + oldSTH.Size, newSTH.Size, hashes, oldSTH.Hash, newSTH.Hash); err != nil { + return err + } + case oldTreeSize > int64(newSTH.Size): + return errors.New("inclusion proof returned a tree size larger than the verified tree size") + } + return nil + +} + +// VerifyCurrentCheckpoint verifies the provided checkpoint by verifying consistency +// against a newly fetched Checkpoint. +// nolint +func VerifyCurrentCheckpoint(ctx context.Context, rClient *client.Rekor, verifier signature.Verifier, + oldSTH *util.SignedCheckpoint) (*util.SignedCheckpoint, error) { + // The oldSTH should already be verified, but check for robustness. + if !oldSTH.Verify(verifier) { + return nil, errors.New("signature on old tree head did not verify") + } + + // Get and verify against the current STH. + infoParams := tlog.NewGetLogInfoParamsWithContext(ctx) + result, err := rClient.Tlog.GetLogInfo(infoParams) + if err != nil { + return nil, err + } + + logInfo := result.GetPayload() + sth := util.SignedCheckpoint{} + if err := sth.UnmarshalText([]byte(*logInfo.SignedTreeHead)); err != nil { + return nil, err + } + + // Verify the signature on the SignedCheckpoint. + if !sth.Verify(verifier) { + return nil, errors.New("signature on tree head did not verify") + } + + // Now verify consistency up to the STH. + if err := ProveConsistency(ctx, rClient, oldSTH, &sth, *logInfo.TreeID); err != nil { + return nil, err + } + return &sth, nil +} + +// VerifyCheckpointSignature verifies the signature on a checkpoint (signed tree head). It does +// not verify consistency against other checkpoints. +// nolint +func VerifyCheckpointSignature(e *models.LogEntryAnon, verifier signature.Verifier) error { + sth := &util.SignedCheckpoint{} + if err := sth.UnmarshalText([]byte(*e.Verification.InclusionProof.Checkpoint)); err != nil { + return fmt.Errorf("unmarshalling log entry checkpoint to SignedCheckpoint: %w", err) + } + if !sth.Verify(verifier) { + return errors.New("signature on checkpoint did not verify") + } + rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash) + if err != nil { + return errors.New("decoding inclusion proof root has") + } + + if !bytes.EqualFold(rootHash, sth.Hash) { + return fmt.Errorf("proof root hash does not match signed tree head, expected %s got %s", + *e.Verification.InclusionProof.RootHash, + hex.EncodeToString(sth.Hash)) + } + return nil +} + +// VerifyInclusion verifies an entry's inclusion proof. Clients MUST either verify +// the root hash against a new STH (via VerifyCurrentCheckpoint) or against a +// trusted, existing STH (via ProveConsistency). +// nolint +func VerifyInclusion(ctx context.Context, e *models.LogEntryAnon) error { + if e.Verification == nil || e.Verification.InclusionProof == nil { + return errors.New("inclusion proof not provided") + } + + hashes := [][]byte{} + for _, h := range e.Verification.InclusionProof.Hashes { + hb, _ := hex.DecodeString(h) + hashes = append(hashes, hb) + } + + rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash) + if err != nil { + return err + } + + // Verify the inclusion proof. + entryBytes, err := base64.StdEncoding.DecodeString(e.Body.(string)) + if err != nil { + return err + } + leafHash := rfc6962.DefaultHasher.HashLeaf(entryBytes) + + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(*e.Verification.InclusionProof.LogIndex), + uint64(*e.Verification.InclusionProof.TreeSize), leafHash, hashes, rootHash); err != nil { + return err + } + + return nil +} + +// VerifySignedEntryTimestamp verifies the entry's SET against the provided +// public key. +// nolint +func VerifySignedEntryTimestamp(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error { + if e.Verification == nil { + return errors.New("missing verification") + } + if e.Verification.SignedEntryTimestamp == nil { + return errors.New("signature missing") + } + + type bundle struct { + Body interface{} `json:"body"` + IntegratedTime int64 `json:"integratedTime"` + // Note that this is the virtual index. + LogIndex int64 `json:"logIndex"` + LogID string `json:"logID"` + } + bundlePayload := bundle{ + Body: e.Body, + IntegratedTime: *e.IntegratedTime, + LogIndex: *e.LogIndex, + LogID: *e.LogID, + } + contents, err := json.Marshal(bundlePayload) + if err != nil { + return fmt.Errorf("marshaling bundle: %w", err) + } + canonicalized, err := jsoncanonicalizer.Transform(contents) + if err != nil { + return fmt.Errorf("canonicalizing bundle: %w", err) + } + + // verify the SET against the public key + if err := verifier.VerifySignature(bytes.NewReader(e.Verification.SignedEntryTimestamp), + bytes.NewReader(canonicalized), options.WithContext(ctx)); err != nil { + return fmt.Errorf("unable to verify bundle: %w", err) + } + return nil +} + +// VerifyLogEntry performs verification of a LogEntry given a Rekor verifier. +// Performs inclusion proof verification up to a verified root hash, +// SignedEntryTimestamp verification, and checkpoint verification. +// nolint +func VerifyLogEntry(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error { + // Verify the inclusion proof using the body's leaf hash. + if err := VerifyInclusion(ctx, e); err != nil { + return err + } + + // Verify checkpoint, which includes a signed root hash. + if err := VerifyCheckpointSignature(e, verifier); err != nil { + return err + } + + // Verify the Signed Entry Timestamp. + if err := VerifySignedEntryTimestamp(ctx, e, verifier); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt new file mode 100644 index 00000000..406ee260 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt @@ -0,0 +1,13 @@ +Copyright 2023 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/sigstore/sigstore-go/LICENSE similarity index 93% rename from vendor/github.com/docker/docker/LICENSE rename to vendor/github.com/sigstore/sigstore-go/LICENSE index 6d8d58fb..261eeb9e 100644 --- a/vendor/github.com/docker/docker/LICENSE +++ b/vendor/github.com/sigstore/sigstore-go/LICENSE @@ -1,7 +1,6 @@ - Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +175,24 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2018 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go new file mode 100644 index 00000000..e9a0680d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go @@ -0,0 +1,239 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a verbatim copy of https://github.com/sigstore/fulcio/blob/3707d80bb25330bc7ffbd9702fb401cd643e36fa/pkg/certificate/extensions.go , +// EXCEPT: +// - the parseExtensions func has been renamed ParseExtensions + +package certificate + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + // Deprecated: Use OIDIssuerV2 + OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} + // Deprecated: Use OIDBuildTrigger + OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2} + // Deprecated: Use OIDSourceRepositoryDigest + OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3} + // Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest + OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4} + // Deprecated: Use SourceRepositoryURI + OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5} + // Deprecated: Use OIDSourceRepositoryRef + OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6} + + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8} + + // CI extensions + OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9} + OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10} + OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11} + OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12} + OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13} + OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14} + OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15} + OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16} + OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17} + OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18} + OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19} + OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20} + OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21} + OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22} +) + +// Extensions contains all custom x509 extensions defined by Fulcio +type Extensions struct { + // NB: New extensions must be added here and documented + // at docs/oidc-info.md + + // The OIDC issuer. Should match `iss` claim of ID token or, in the case of + // a federated login like Dex it should match the issuer URL of the + // upstream issuer. The issuer is not set the extensions are invalid and + // will fail to render. + Issuer string `json:"issuer,omitempty"` // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated) + + // Deprecated + // Triggering event of the Github Workflow. Matches the `event_name` claim of ID + // tokens from Github Actions + GithubWorkflowTrigger string `json:"githubWorkflowTrigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2 + + // Deprecated + // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID + // tokens from Github Actions + GithubWorkflowSHA string `json:"githubWorkflowSHA,omitempty"` //nolint:tagliatelle // OID 1.3.6.1.4.1.57264.1.3 + + // Deprecated + // Name of Github Actions Workflow. Matches the `workflow` claim of the ID + // tokens from Github Actions + GithubWorkflowName string `json:"githubWorkflowName,omitempty"` // OID 1.3.6.1.4.1.57264.1.4 + + // Deprecated + // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID + // tokens from Github Actions + GithubWorkflowRepository string `json:"githubWorkflowRepository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5 + + // Deprecated + // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens + // from Github Actions + GithubWorkflowRef string `json:"githubWorkflowRef,omitempty"` // 1.3.6.1.4.1.57264.1.6 + + // Reference to specific build instructions that are responsible for signing. + BuildSignerURI string `json:"buildSignerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.9 + + // Immutable reference to the specific version of the build instructions that is responsible for signing. + BuildSignerDigest string `json:"buildSignerDigest,omitempty"` // 1.3.6.1.4.1.57264.1.10 + + // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + RunnerEnvironment string `json:"runnerEnvironment,omitempty"` // 1.3.6.1.4.1.57264.1.11 + + // Source repository URL that the build was based on. + SourceRepositoryURI string `json:"sourceRepositoryURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.12 + + // Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryDigest string `json:"sourceRepositoryDigest,omitempty"` // 1.3.6.1.4.1.57264.1.13 + + // Source Repository Ref that the build run was based upon. + SourceRepositoryRef string `json:"sourceRepositoryRef,omitempty"` // 1.3.6.1.4.1.57264.1.14 + + // Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryIdentifier string `json:"sourceRepositoryIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.15 + + // Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerURI string `json:"sourceRepositoryOwnerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.16 + + // Immutable identifier for the owner of the source repository that the workflow was based upon. + SourceRepositoryOwnerIdentifier string `json:"sourceRepositoryOwnerIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.17 + + // Build Config URL to the top-level/initiating build instructions. + BuildConfigURI string `json:"buildConfigURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.18 + + // Immutable reference to the specific version of the top-level/initiating build instructions. + BuildConfigDigest string `json:"buildConfigDigest,omitempty"` // 1.3.6.1.4.1.57264.1.19 + + // Event or action that initiated the build. + BuildTrigger string `json:"buildTrigger,omitempty"` // 1.3.6.1.4.1.57264.1.20 + + // Run Invocation URL to uniquely identify the build execution. + RunInvocationURI string `json:"runInvocationURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.21 + + // Source repository visibility at the time of signing the certificate. + SourceRepositoryVisibilityAtSigning string `json:"sourceRepositoryVisibilityAtSigning,omitempty"` // 1.3.6.1.4.1.57264.1.22 +} + +func ParseExtensions(ext []pkix.Extension) (Extensions, error) { + out := Extensions{} + + for _, e := range ext { + switch { + // BEGIN: Deprecated + case e.Id.Equal(OIDIssuer): + out.Issuer = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowTrigger): + out.GithubWorkflowTrigger = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowSHA): + out.GithubWorkflowSHA = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowName): + out.GithubWorkflowName = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRepository): + out.GithubWorkflowRepository = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRef): + out.GithubWorkflowRef = string(e.Value) + // END: Deprecated + case e.Id.Equal(OIDIssuerV2): + if err := ParseDERString(e.Value, &out.Issuer); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerURI): + if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerDigest): + if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunnerEnvironment): + if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryDigest): + if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryRef): + if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigURI): + if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigDigest): + if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildTrigger): + if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunInvocationURI): + if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning): + if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil { + return Extensions{}, err + } + } + } + + // We only ever return nil, but leaving error in place so that we can add + // more complex parsing of fields in a backwards compatible way if needed. + return out, nil +} + +// ParseDERString decodes a DER-encoded string and puts the value in parsedVal. +// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding. +func ParseDERString(val []byte, parsedVal *string) error { + rest, err := asn1.Unmarshal(val, parsedVal) + if err != nil { + return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %w", err) + } + if len(rest) != 0 { + return errors.New("unexpected trailing bytes in DER-encoded string") + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go new file mode 100644 index 00000000..78b7fee0 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go @@ -0,0 +1,90 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certificate + +import ( + "crypto/x509" + "errors" + "fmt" + "reflect" + + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type Summary struct { + CertificateIssuer string `json:"certificateIssuer"` + SubjectAlternativeName string `json:"subjectAlternativeName"` + Extensions +} + +type ErrCompareExtensions struct { + field string + expected string + actual string +} + +func (e *ErrCompareExtensions) Error() string { + return fmt.Sprintf("expected %s to be \"%s\", got \"%s\"", e.field, e.expected, e.actual) +} + +func SummarizeCertificate(cert *x509.Certificate) (Summary, error) { + extensions, err := ParseExtensions(cert.Extensions) + + if err != nil { + return Summary{}, err + } + + var san string + + switch { + case len(cert.URIs) > 0: + san = cert.URIs[0].String() + case len(cert.EmailAddresses) > 0: + san = cert.EmailAddresses[0] + } + if san == "" { + san, _ = cryptoutils.UnmarshalOtherNameSAN(cert.Extensions) + } + if san == "" { + return Summary{}, errors.New("No Subject Alternative Name found") + } + + return Summary{CertificateIssuer: cert.Issuer.String(), SubjectAlternativeName: san, Extensions: extensions}, nil +} + +// CompareExtensions compares two Extensions structs and returns an error if +// any set values in the expected struct not equal. Empty fields in the +// expectedExt struct are ignored. +func CompareExtensions(expectedExt, actualExt Extensions) error { + expExtValue := reflect.ValueOf(expectedExt) + actExtValue := reflect.ValueOf(actualExt) + + fields := reflect.VisibleFields(expExtValue.Type()) + for _, field := range fields { + expectedFieldVal := expExtValue.FieldByName(field.Name) + + // if the expected field is empty, skip it + if expectedFieldVal.IsValid() && !expectedFieldVal.IsZero() { + actualFieldVal := actExtValue.FieldByName(field.Name) + if actualFieldVal.IsValid() { + if expectedFieldVal.Interface() != actualFieldVal.Interface() { + return &ErrCompareExtensions{field.Name, fmt.Sprintf("%v", expectedFieldVal.Interface()), fmt.Sprintf("%v", actualFieldVal.Interface())} + } + } + } + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go new file mode 100644 index 00000000..5e1cb67c --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go @@ -0,0 +1,66 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto/x509" + "errors" + "time" +) + +type CertificateAuthority interface { + Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error) +} + +type FulcioCertificateAuthority struct { + Root *x509.Certificate + Intermediates []*x509.Certificate + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + URI string +} + +var _ CertificateAuthority = &FulcioCertificateAuthority{} + +func (ca *FulcioCertificateAuthority) Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error) { + if !ca.ValidityPeriodStart.IsZero() && observerTimestamp.Before(ca.ValidityPeriodStart) { + return nil, errors.New("certificate is not valid yet") + } + if !ca.ValidityPeriodEnd.IsZero() && observerTimestamp.After(ca.ValidityPeriodEnd) { + return nil, errors.New("certificate is no longer valid") + } + + rootCertPool := x509.NewCertPool() + rootCertPool.AddCert(ca.Root) + intermediateCertPool := x509.NewCertPool() + for _, cert := range ca.Intermediates { + intermediateCertPool.AddCert(cert) + } + + // From spec: + // > ## Certificate + // > For a signature with a given certificate to be considered valid, it must have a timestamp while every certificate in the chain up to the root is valid (the so-called “hybrid model” of certificate verification per Braun et al. (2013)). + + opts := x509.VerifyOptions{ + CurrentTime: observerTimestamp, + Roots: rootCertPool, + Intermediates: intermediateCertPool, + KeyUsages: []x509.ExtKeyUsage{ + x509.ExtKeyUsageCodeSigning, + }, + } + + return cert.Verify(opts) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go new file mode 100644 index 00000000..f9a92953 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go @@ -0,0 +1,165 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "fmt" + "os" + + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + "github.com/sigstore/sigstore-go/pkg/tuf" + "google.golang.org/protobuf/encoding/protojson" +) + +const SigningConfigMediaType01 = "application/vnd.dev.sigstore.signingconfig.v0.1+json" + +type SigningConfig struct { + signingConfig *prototrustroot.SigningConfig +} + +func (sc *SigningConfig) FulcioCertificateAuthorityURL() string { + return sc.signingConfig.GetCaUrl() +} + +func (sc *SigningConfig) OIDCProviderURL() string { + return sc.signingConfig.GetOidcUrl() +} + +func (sc *SigningConfig) RekorLogURLs() []string { + return sc.signingConfig.GetTlogUrls() +} + +func (sc *SigningConfig) TimestampAuthorityURLs() []string { + return sc.signingConfig.GetTsaUrls() +} + +func (sc *SigningConfig) WithFulcioCertificateAuthorityURL(fulcioURL string) *SigningConfig { + sc.signingConfig.CaUrl = fulcioURL + return sc +} + +func (sc *SigningConfig) WithOIDCProviderURL(oidcURL string) *SigningConfig { + sc.signingConfig.OidcUrl = oidcURL + return sc +} + +func (sc *SigningConfig) WithRekorLogURLs(logURLs []string) *SigningConfig { + sc.signingConfig.TlogUrls = logURLs + return sc +} + +func (sc *SigningConfig) AddRekorLogURLs(logURLs ...string) *SigningConfig { + sc.signingConfig.TlogUrls = append(sc.signingConfig.TlogUrls, logURLs...) + return sc +} + +func (sc *SigningConfig) WithTimestampAuthorityURLs(tsaURLs []string) *SigningConfig { + sc.signingConfig.TsaUrls = tsaURLs + return sc +} + +func (sc *SigningConfig) AddTimestampAuthorityURLs(tsaURLs ...string) *SigningConfig { + sc.signingConfig.TsaUrls = append(sc.signingConfig.TsaUrls, tsaURLs...) + return sc +} + +func (sc SigningConfig) String() string { + return fmt.Sprintf("{CA: %v, OIDC: %v, RekorLogs: %v, TSAs: %v, MediaType: %s}", + sc.FulcioCertificateAuthorityURL(), sc.OIDCProviderURL(), sc.RekorLogURLs(), sc.TimestampAuthorityURLs(), SigningConfigMediaType01) +} + +// NewSigningConfig initializes a SigningConfig object from a mediaType string, Fulcio certificate +// authority URL, OIDC provider URL, list of Rekor transpraency log URLs, and a list of +// timestamp authorities. +func NewSigningConfig(mediaType string, + fulcioCertificateAuthority string, + oidcProvider string, + rekorLogs []string, + timestampAuthorities []string) (*SigningConfig, error) { + if mediaType != SigningConfigMediaType01 { + return nil, fmt.Errorf("unsupported SigningConfig media type, must be: %s", SigningConfigMediaType01) + } + sc := &SigningConfig{ + signingConfig: &prototrustroot.SigningConfig{ + MediaType: mediaType, + CaUrl: fulcioCertificateAuthority, + OidcUrl: oidcProvider, + TlogUrls: rekorLogs, + TsaUrls: timestampAuthorities, + }, + } + return sc, nil +} + +// NewSigningConfigFromProtobuf returns a Sigstore signing configuration. +func NewSigningConfigFromProtobuf(sc *prototrustroot.SigningConfig) (*SigningConfig, error) { + if sc.GetMediaType() != SigningConfigMediaType01 { + return nil, fmt.Errorf("unsupported SigningConfig media type: %s", sc.GetMediaType()) + } + return &SigningConfig{signingConfig: sc}, nil +} + +// NewSigningConfigFromPath returns a Sigstore signing configuration from a file. +func NewSigningConfigFromPath(path string) (*SigningConfig, error) { + scJSON, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + return NewSigningConfigFromJSON(scJSON) +} + +// NewSigningConfigFromJSON returns a Sigstore signing configuration from JSON. +func NewSigningConfigFromJSON(rootJSON []byte) (*SigningConfig, error) { + pbSC, err := NewSigningConfigProtobuf(rootJSON) + if err != nil { + return nil, err + } + + return NewSigningConfigFromProtobuf(pbSC) +} + +// NewSigningConfigProtobuf returns a Sigstore signing configuration as a protobuf. +func NewSigningConfigProtobuf(scJSON []byte) (*prototrustroot.SigningConfig, error) { + pbSC := &prototrustroot.SigningConfig{} + err := protojson.Unmarshal(scJSON, pbSC) + if err != nil { + return nil, err + } + return pbSC, nil +} + +// FetchSigningConfig fetches the public-good Sigstore signing configuration from TUF. +func FetchSigningConfig() (*SigningConfig, error) { + return FetchSigningConfigWithOptions(tuf.DefaultOptions()) +} + +// FetchSigningConfig fetches the public-good Sigstore signing configuration with the given options from TUF. +func FetchSigningConfigWithOptions(opts *tuf.Options) (*SigningConfig, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, err + } + return GetSigningConfig(client) +} + +// FetchSigningConfig fetches the public-good Sigstore signing configuration target from TUF. +func GetSigningConfig(c *tuf.Client) (*SigningConfig, error) { + jsonBytes, err := c.GetTarget("signing_config.json") + if err != nil { + return nil, err + } + return NewSigningConfigFromJSON(jsonBytes) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go new file mode 100644 index 00000000..07545bd4 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go @@ -0,0 +1,68 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "bytes" + "crypto/x509" + "errors" + "time" + + tsaverification "github.com/sigstore/timestamp-authority/pkg/verification" +) + +type Timestamp struct { + Time time.Time + URI string +} + +type TimestampingAuthority interface { + Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error) +} + +type SigstoreTimestampingAuthority struct { + Root *x509.Certificate + Intermediates []*x509.Certificate + Leaf *x509.Certificate + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + URI string +} + +var _ TimestampingAuthority = &SigstoreTimestampingAuthority{} + +func (tsa *SigstoreTimestampingAuthority) Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error) { + trustedRootVerificationOptions := tsaverification.VerifyOpts{ + Roots: []*x509.Certificate{tsa.Root}, + Intermediates: tsa.Intermediates, + TSACertificate: tsa.Leaf, + } + + // Ensure timestamp responses are from trusted sources + timestamp, err := tsaverification.VerifyTimestampResponse(signedTimestamp, bytes.NewReader(signatureBytes), trustedRootVerificationOptions) + if err != nil { + return nil, err + } + + if !tsa.ValidityPeriodStart.IsZero() && timestamp.Time.Before(tsa.ValidityPeriodStart) { + return nil, errors.New("timestamp is before the validity period start") + } + if !tsa.ValidityPeriodEnd.IsZero() && timestamp.Time.After(tsa.ValidityPeriodEnd) { + return nil, errors.New("timestamp is after the validity period end") + } + + // All above verification successful, so return nil + return &Timestamp{Time: timestamp.Time, URI: tsa.URI}, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go new file mode 100644 index 00000000..d1ec4d46 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "fmt" + "time" + + "github.com/sigstore/sigstore/pkg/signature" +) + +type TrustedMaterial interface { + TimestampingAuthorities() []TimestampingAuthority + FulcioCertificateAuthorities() []CertificateAuthority + RekorLogs() map[string]*TransparencyLog + CTLogs() map[string]*TransparencyLog + PublicKeyVerifier(string) (TimeConstrainedVerifier, error) +} + +type BaseTrustedMaterial struct{} + +func (b *BaseTrustedMaterial) TimestampingAuthorities() []TimestampingAuthority { + return []TimestampingAuthority{} +} + +func (b *BaseTrustedMaterial) FulcioCertificateAuthorities() []CertificateAuthority { + return []CertificateAuthority{} +} + +func (b *BaseTrustedMaterial) RekorLogs() map[string]*TransparencyLog { + return map[string]*TransparencyLog{} +} + +func (b *BaseTrustedMaterial) CTLogs() map[string]*TransparencyLog { + return map[string]*TransparencyLog{} +} + +func (b *BaseTrustedMaterial) PublicKeyVerifier(_ string) (TimeConstrainedVerifier, error) { + return nil, fmt.Errorf("public key verifier not found") +} + +type TrustedMaterialCollection []TrustedMaterial + +// Ensure types implement interfaces +var _ TrustedMaterial = &BaseTrustedMaterial{} +var _ TrustedMaterial = TrustedMaterialCollection{} + +func (tmc TrustedMaterialCollection) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + for _, tm := range tmc { + verifier, err := tm.PublicKeyVerifier(keyID) + if err == nil { + return verifier, nil + } + } + return nil, fmt.Errorf("public key verifier not found for keyID: %s", keyID) +} + +func (tmc TrustedMaterialCollection) TimestampingAuthorities() []TimestampingAuthority { + var timestampingAuthorities []TimestampingAuthority + for _, tm := range tmc { + timestampingAuthorities = append(timestampingAuthorities, tm.TimestampingAuthorities()...) + } + return timestampingAuthorities +} + +func (tmc TrustedMaterialCollection) FulcioCertificateAuthorities() []CertificateAuthority { + var certAuthorities []CertificateAuthority + for _, tm := range tmc { + certAuthorities = append(certAuthorities, tm.FulcioCertificateAuthorities()...) + } + return certAuthorities +} + +func (tmc TrustedMaterialCollection) RekorLogs() map[string]*TransparencyLog { + rekorLogs := make(map[string]*TransparencyLog) + for _, tm := range tmc { + for keyID, tlogVerifier := range tm.RekorLogs() { + rekorLogs[keyID] = tlogVerifier + } + } + return rekorLogs +} + +func (tmc TrustedMaterialCollection) CTLogs() map[string]*TransparencyLog { + rekorLogs := make(map[string]*TransparencyLog) + for _, tm := range tmc { + for keyID, tlogVerifier := range tm.CTLogs() { + rekorLogs[keyID] = tlogVerifier + } + } + return rekorLogs +} + +type ValidityPeriodChecker interface { + ValidAtTime(time.Time) bool +} + +type TimeConstrainedVerifier interface { + ValidityPeriodChecker + signature.Verifier +} + +type TrustedPublicKeyMaterial struct { + BaseTrustedMaterial + publicKeyVerifier func(string) (TimeConstrainedVerifier, error) +} + +func (tr *TrustedPublicKeyMaterial) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + return tr.publicKeyVerifier(keyID) +} + +func NewTrustedPublicKeyMaterial(publicKeyVerifier func(string) (TimeConstrainedVerifier, error)) *TrustedPublicKeyMaterial { + return &TrustedPublicKeyMaterial{ + publicKeyVerifier: publicKeyVerifier, + } +} + +// ExpiringKey is a TimeConstrainedVerifier with a static validity period. +type ExpiringKey struct { + signature.Verifier + validityPeriodStart time.Time + validityPeriodEnd time.Time +} + +var _ TimeConstrainedVerifier = &ExpiringKey{} + +// ValidAtTime returns true if the key is valid at the given time. If the +// validity period start time is not set, the key is considered valid for all +// times before the end time. Likewise, if the validity period end time is not +// set, the key is considered valid for all times after the start time. +func (k *ExpiringKey) ValidAtTime(t time.Time) bool { + if !k.validityPeriodStart.IsZero() && t.Before(k.validityPeriodStart) { + return false + } + if !k.validityPeriodEnd.IsZero() && t.After(k.validityPeriodEnd) { + return false + } + return true +} + +// NewExpiringKey returns a new ExpiringKey with the given validity period +func NewExpiringKey(verifier signature.Verifier, validityPeriodStart, validityPeriodEnd time.Time) *ExpiringKey { + return &ExpiringKey{ + Verifier: verifier, + validityPeriodStart: validityPeriodStart, + validityPeriodEnd: validityPeriodEnd, + } +} + +// NewTrustedPublicKeyMaterialFromMapping returns a TrustedPublicKeyMaterial from a map of key IDs to +// ExpiringKeys. +func NewTrustedPublicKeyMaterialFromMapping(trustedPublicKeys map[string]*ExpiringKey) *TrustedPublicKeyMaterial { + return NewTrustedPublicKeyMaterial(func(keyID string) (TimeConstrainedVerifier, error) { + expiringKey, ok := trustedPublicKeys[keyID] + if !ok { + return nil, fmt.Errorf("public key not found for keyID: %s", keyID) + } + return expiringKey, nil + }) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go new file mode 100644 index 00000000..c48c6084 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go @@ -0,0 +1,505 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "fmt" + "log" + "os" + "sync" + "time" + + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + "github.com/sigstore/sigstore-go/pkg/tuf" + "google.golang.org/protobuf/encoding/protojson" +) + +const TrustedRootMediaType01 = "application/vnd.dev.sigstore.trustedroot+json;version=0.1" + +type TrustedRoot struct { + BaseTrustedMaterial + trustedRoot *prototrustroot.TrustedRoot + rekorLogs map[string]*TransparencyLog + certificateAuthorities []CertificateAuthority + ctLogs map[string]*TransparencyLog + timestampingAuthorities []TimestampingAuthority +} + +type TransparencyLog struct { + BaseURL string + ID []byte + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + // This is the hash algorithm used by the Merkle tree + HashFunc crypto.Hash + PublicKey crypto.PublicKey + // The hash algorithm used during signature creation + SignatureHashFunc crypto.Hash +} + +func (tr *TrustedRoot) TimestampingAuthorities() []TimestampingAuthority { + return tr.timestampingAuthorities +} + +func (tr *TrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority { + return tr.certificateAuthorities +} + +func (tr *TrustedRoot) RekorLogs() map[string]*TransparencyLog { + return tr.rekorLogs +} + +func (tr *TrustedRoot) CTLogs() map[string]*TransparencyLog { + return tr.ctLogs +} + +func (tr *TrustedRoot) MarshalJSON() ([]byte, error) { + err := tr.constructProtoTrustRoot() + if err != nil { + return nil, fmt.Errorf("failed constructing protobuf TrustRoot representation: %w", err) + } + + return protojson.Marshal(tr.trustedRoot) +} + +func NewTrustedRootFromProtobuf(protobufTrustedRoot *prototrustroot.TrustedRoot) (trustedRoot *TrustedRoot, err error) { + if protobufTrustedRoot.GetMediaType() != TrustedRootMediaType01 { + return nil, fmt.Errorf("unsupported TrustedRoot media type: %s", protobufTrustedRoot.GetMediaType()) + } + + trustedRoot = &TrustedRoot{trustedRoot: protobufTrustedRoot} + trustedRoot.rekorLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetTlogs()) + if err != nil { + return nil, err + } + + trustedRoot.certificateAuthorities, err = ParseCertificateAuthorities(protobufTrustedRoot.GetCertificateAuthorities()) + if err != nil { + return nil, err + } + + trustedRoot.timestampingAuthorities, err = ParseTimestampingAuthorities(protobufTrustedRoot.GetTimestampAuthorities()) + if err != nil { + return nil, err + } + + trustedRoot.ctLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetCtlogs()) + if err != nil { + return nil, err + } + + return trustedRoot, nil +} + +func ParseTransparencyLogs(tlogs []*prototrustroot.TransparencyLogInstance) (transparencyLogs map[string]*TransparencyLog, err error) { + transparencyLogs = make(map[string]*TransparencyLog) + for _, tlog := range tlogs { + if tlog.GetHashAlgorithm() != protocommon.HashAlgorithm_SHA2_256 { + return nil, fmt.Errorf("unsupported tlog hash algorithm: %s", tlog.GetHashAlgorithm()) + } + if tlog.GetLogId() == nil { + return nil, fmt.Errorf("tlog missing log ID") + } + if tlog.GetLogId().GetKeyId() == nil { + return nil, fmt.Errorf("tlog missing log ID key ID") + } + encodedKeyID := hex.EncodeToString(tlog.GetLogId().GetKeyId()) + + if tlog.GetPublicKey() == nil { + return nil, fmt.Errorf("tlog missing public key") + } + if tlog.GetPublicKey().GetRawBytes() == nil { + return nil, fmt.Errorf("tlog missing public key raw bytes") + } + + var hashFunc crypto.Hash + switch tlog.GetHashAlgorithm() { + case protocommon.HashAlgorithm_SHA2_256: + hashFunc = crypto.SHA256 + default: + return nil, fmt.Errorf("unsupported hash function for the tlog") + } + + tlogEntry := &TransparencyLog{ + BaseURL: tlog.GetBaseUrl(), + ID: tlog.GetLogId().GetKeyId(), + HashFunc: hashFunc, + SignatureHashFunc: crypto.SHA256, + } + + switch tlog.GetPublicKey().GetKeyDetails() { + case protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, + protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, + protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, err + } + var ecKey *ecdsa.PublicKey + var ok bool + if ecKey, ok = key.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not ECDSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = ecKey + // This key format has public key in PKIX RSA format and PKCS1#1v1.5 or RSASSA-PSS signature + case protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, + protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, + protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, err + } + var rsaKey *rsa.PublicKey + var ok bool + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = rsaKey + case protocommon.PublicKeyDetails_PKIX_ED25519: //nolint:staticcheck + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, err + } + var edKey ed25519.PublicKey + var ok bool + if edKey, ok = key.(ed25519.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = edKey + // This key format is deprecated, but currently in use for Sigstore staging instance + case protocommon.PublicKeyDetails_PKCS1_RSA_PKCS1V5: //nolint:staticcheck + key, err := x509.ParsePKCS1PublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, err + } + tlogEntry.PublicKey = key + default: + return nil, fmt.Errorf("unsupported tlog public key type: %s", tlog.GetPublicKey().GetKeyDetails()) + } + + tlogEntry.SignatureHashFunc = getSignatureHashAlgo(tlogEntry.PublicKey) + transparencyLogs[encodedKeyID] = tlogEntry + + if validFor := tlog.GetPublicKey().GetValidFor(); validFor != nil { + if validFor.GetStart() != nil { + transparencyLogs[encodedKeyID].ValidityPeriodStart = validFor.GetStart().AsTime() + } else { + return nil, fmt.Errorf("tlog missing public key validity period start time") + } + if validFor.GetEnd() != nil { + transparencyLogs[encodedKeyID].ValidityPeriodEnd = validFor.GetEnd().AsTime() + } + } else { + return nil, fmt.Errorf("tlog missing public key validity period") + } + } + return transparencyLogs, nil +} + +func ParseCertificateAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (certificateAuthorities []CertificateAuthority, err error) { + certificateAuthorities = make([]CertificateAuthority, len(certAuthorities)) + for i, certAuthority := range certAuthorities { + certificateAuthority, err := ParseCertificateAuthority(certAuthority) + if err != nil { + return nil, err + } + certificateAuthorities[i] = certificateAuthority + } + return certificateAuthorities, nil +} + +func ParseCertificateAuthority(certAuthority *prototrustroot.CertificateAuthority) (*FulcioCertificateAuthority, error) { + if certAuthority == nil { + return nil, fmt.Errorf("CertificateAuthority is nil") + } + certChain := certAuthority.GetCertChain() + if certChain == nil { + return nil, fmt.Errorf("CertificateAuthority missing cert chain") + } + chainLen := len(certChain.GetCertificates()) + if chainLen < 1 { + return nil, fmt.Errorf("CertificateAuthority cert chain is empty") + } + + certificateAuthority := &FulcioCertificateAuthority{ + URI: certAuthority.Uri, + } + for i, cert := range certChain.GetCertificates() { + parsedCert, err := x509.ParseCertificate(cert.RawBytes) + if err != nil { + return nil, err + } + if i < chainLen-1 { + certificateAuthority.Intermediates = append(certificateAuthority.Intermediates, parsedCert) + } else { + certificateAuthority.Root = parsedCert + } + } + validFor := certAuthority.GetValidFor() + if validFor != nil { + start := validFor.GetStart() + if start != nil { + certificateAuthority.ValidityPeriodStart = start.AsTime() + } + end := validFor.GetEnd() + if end != nil { + certificateAuthority.ValidityPeriodEnd = end.AsTime() + } + } + + certificateAuthority.URI = certAuthority.Uri + + return certificateAuthority, nil +} + +func ParseTimestampingAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (timestampingAuthorities []TimestampingAuthority, err error) { + timestampingAuthorities = make([]TimestampingAuthority, len(certAuthorities)) + for i, certAuthority := range certAuthorities { + timestampingAuthority, err := ParseTimestampingAuthority(certAuthority) + if err != nil { + return nil, err + } + timestampingAuthorities[i] = timestampingAuthority + } + return timestampingAuthorities, nil +} + +func ParseTimestampingAuthority(certAuthority *prototrustroot.CertificateAuthority) (TimestampingAuthority, error) { + if certAuthority == nil { + return nil, fmt.Errorf("CertificateAuthority is nil") + } + certChain := certAuthority.GetCertChain() + if certChain == nil { + return nil, fmt.Errorf("CertificateAuthority missing cert chain") + } + chainLen := len(certChain.GetCertificates()) + if chainLen < 1 { + return nil, fmt.Errorf("CertificateAuthority cert chain is empty") + } + + timestampingAuthority := &SigstoreTimestampingAuthority{ + URI: certAuthority.Uri, + } + for i, cert := range certChain.GetCertificates() { + parsedCert, err := x509.ParseCertificate(cert.RawBytes) + if err != nil { + return nil, err + } + switch { + case i == 0 && !parsedCert.IsCA: + timestampingAuthority.Leaf = parsedCert + case i < chainLen-1: + timestampingAuthority.Intermediates = append(timestampingAuthority.Intermediates, parsedCert) + case i == chainLen-1: + timestampingAuthority.Root = parsedCert + } + } + validFor := certAuthority.GetValidFor() + if validFor != nil { + start := validFor.GetStart() + if start != nil { + timestampingAuthority.ValidityPeriodStart = start.AsTime() + } + end := validFor.GetEnd() + if end != nil { + timestampingAuthority.ValidityPeriodEnd = end.AsTime() + } + } + + timestampingAuthority.URI = certAuthority.Uri + + return timestampingAuthority, nil +} + +func NewTrustedRootFromPath(path string) (*TrustedRoot, error) { + trustedrootJSON, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + return NewTrustedRootFromJSON(trustedrootJSON) +} + +// NewTrustedRootFromJSON returns the Sigstore trusted root. +func NewTrustedRootFromJSON(rootJSON []byte) (*TrustedRoot, error) { + pbTrustedRoot, err := NewTrustedRootProtobuf(rootJSON) + if err != nil { + return nil, err + } + + return NewTrustedRootFromProtobuf(pbTrustedRoot) +} + +// NewTrustedRootProtobuf returns the Sigstore trusted root as a protobuf. +func NewTrustedRootProtobuf(rootJSON []byte) (*prototrustroot.TrustedRoot, error) { + pbTrustedRoot := &prototrustroot.TrustedRoot{} + err := protojson.Unmarshal(rootJSON, pbTrustedRoot) + if err != nil { + return nil, err + } + return pbTrustedRoot, nil +} + +// NewTrustedRoot initializes a TrustedRoot object from a mediaType string, list of Fulcio +// certificate authorities, list of timestamp authorities and maps of ctlogs and rekor +// transparency log instances. +func NewTrustedRoot(mediaType string, + certificateAuthorities []CertificateAuthority, + certificateTransparencyLogs map[string]*TransparencyLog, + timestampAuthorities []TimestampingAuthority, + transparencyLogs map[string]*TransparencyLog) (*TrustedRoot, error) { + // document that we assume 1 cert chain per target and with certs already ordered from leaf to root + if mediaType != TrustedRootMediaType01 { + return nil, fmt.Errorf("unsupported TrustedRoot media type: %s", TrustedRootMediaType01) + } + tr := &TrustedRoot{ + certificateAuthorities: certificateAuthorities, + ctLogs: certificateTransparencyLogs, + timestampingAuthorities: timestampAuthorities, + rekorLogs: transparencyLogs, + } + return tr, nil +} + +// FetchTrustedRoot fetches the Sigstore trusted root from TUF and returns it. +func FetchTrustedRoot() (*TrustedRoot, error) { + return FetchTrustedRootWithOptions(tuf.DefaultOptions()) +} + +// FetchTrustedRootWithOptions fetches the trusted root from TUF with the given options and returns it. +func FetchTrustedRootWithOptions(opts *tuf.Options) (*TrustedRoot, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, err + } + return GetTrustedRoot(client) +} + +// GetTrustedRoot returns the trusted root +func GetTrustedRoot(c *tuf.Client) (*TrustedRoot, error) { + jsonBytes, err := c.GetTarget("trusted_root.json") + if err != nil { + return nil, err + } + return NewTrustedRootFromJSON(jsonBytes) +} + +func getSignatureHashAlgo(pubKey crypto.PublicKey) crypto.Hash { + var h crypto.Hash + switch pk := pubKey.(type) { + case *rsa.PublicKey: + h = crypto.SHA256 + case *ecdsa.PublicKey: + switch pk.Curve { + case elliptic.P256(): + h = crypto.SHA256 + case elliptic.P384(): + h = crypto.SHA384 + case elliptic.P521(): + h = crypto.SHA512 + default: + h = crypto.SHA256 + } + case ed25519.PublicKey: + h = crypto.SHA512 + default: + h = crypto.SHA256 + } + return h +} + +// LiveTrustedRoot is a wrapper around TrustedRoot that periodically +// refreshes the trusted root from TUF. This is needed for long-running +// processes to ensure that the trusted root does not expire. +type LiveTrustedRoot struct { + *TrustedRoot + mu sync.RWMutex +} + +// NewLiveTrustedRoot returns a LiveTrustedRoot that will periodically +// refresh the trusted root from TUF. +func NewLiveTrustedRoot(opts *tuf.Options) (*LiveTrustedRoot, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, err + } + tr, err := GetTrustedRoot(client) + if err != nil { + return nil, err + } + ltr := &LiveTrustedRoot{ + TrustedRoot: tr, + mu: sync.RWMutex{}, + } + ticker := time.NewTicker(time.Hour * 24) + go func() { + for { + select { + case <-ticker.C: + client, err = tuf.New(opts) + if err != nil { + log.Printf("error creating TUF client: %v", err) + } + newTr, err := GetTrustedRoot(client) + if err != nil { + log.Printf("error fetching trusted root: %v", err) + continue + } + ltr.mu.Lock() + ltr.TrustedRoot = newTr + ltr.mu.Unlock() + } + } + }() + return ltr, nil +} + +func (l *LiveTrustedRoot) TimestampingAuthorities() []TimestampingAuthority { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.TimestampingAuthorities() +} + +func (l *LiveTrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.FulcioCertificateAuthorities() +} + +func (l *LiveTrustedRoot) RekorLogs() map[string]*TransparencyLog { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.RekorLogs() +} + +func (l *LiveTrustedRoot) CTLogs() map[string]*TransparencyLog { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.CTLogs() +} + +func (l *LiveTrustedRoot) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.PublicKeyVerifier(keyID) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go new file mode 100644 index 00000000..d467c2bd --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go @@ -0,0 +1,270 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "fmt" + "sort" + "time" + + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +func (tr *TrustedRoot) constructProtoTrustRoot() error { + tr.trustedRoot = &prototrustroot.TrustedRoot{} + tr.trustedRoot.MediaType = TrustedRootMediaType01 + + for logID, transparencyLog := range tr.rekorLogs { + tlProto, err := transparencyLogToProtobufTL(transparencyLog) + if err != nil { + return fmt.Errorf("failed converting rekor log %s to protobuf: %w", logID, err) + } + tr.trustedRoot.Tlogs = append(tr.trustedRoot.Tlogs, tlProto) + } + // ensure stable sorting of the slice + sortTlogSlice(tr.trustedRoot.Tlogs) + + for logID, ctLog := range tr.ctLogs { + ctProto, err := transparencyLogToProtobufTL(ctLog) + if err != nil { + return fmt.Errorf("failed converting ctlog %s to protobuf: %w", logID, err) + } + tr.trustedRoot.Ctlogs = append(tr.trustedRoot.Ctlogs, ctProto) + } + // ensure stable sorting of the slice + sortTlogSlice(tr.trustedRoot.Ctlogs) + + for _, ca := range tr.certificateAuthorities { + caProto, err := certificateAuthorityToProtobufCA(ca.(*FulcioCertificateAuthority)) + if err != nil { + return fmt.Errorf("failed converting fulcio cert chain to protobuf: %w", err) + } + tr.trustedRoot.CertificateAuthorities = append(tr.trustedRoot.CertificateAuthorities, caProto) + } + // ensure stable sorting of the slice + sortCASlice(tr.trustedRoot.CertificateAuthorities) + + for _, ca := range tr.timestampingAuthorities { + caProto, err := timestampingAuthorityToProtobufCA(ca.(*SigstoreTimestampingAuthority)) + if err != nil { + return fmt.Errorf("failed converting TSA cert chain to protobuf: %w", err) + } + tr.trustedRoot.TimestampAuthorities = append(tr.trustedRoot.TimestampAuthorities, caProto) + } + // ensure stable sorting of the slice + sortCASlice(tr.trustedRoot.TimestampAuthorities) + + return nil +} + +func sortCASlice(slc []*prototrustroot.CertificateAuthority) { + sort.Slice(slc, func(i, j int) bool { + iTime := time.Unix(0, 0) + jTime := time.Unix(0, 0) + + if slc[i].ValidFor.Start != nil { + iTime = slc[i].ValidFor.Start.AsTime() + } + if slc[j].ValidFor.Start != nil { + jTime = slc[j].ValidFor.Start.AsTime() + } + + return iTime.Before(jTime) + }) +} + +func sortTlogSlice(slc []*prototrustroot.TransparencyLogInstance) { + sort.Slice(slc, func(i, j int) bool { + iTime := time.Unix(0, 0) + jTime := time.Unix(0, 0) + + if slc[i].PublicKey.ValidFor.Start != nil { + iTime = slc[i].PublicKey.ValidFor.Start.AsTime() + } + if slc[j].PublicKey.ValidFor.Start != nil { + jTime = slc[j].PublicKey.ValidFor.Start.AsTime() + } + + return iTime.Before(jTime) + }) +} + +func certificateAuthorityToProtobufCA(ca *FulcioCertificateAuthority) (*prototrustroot.CertificateAuthority, error) { + org := "" + if len(ca.Root.Subject.Organization) > 0 { + org = ca.Root.Subject.Organization[0] + } + var allCerts []*protocommon.X509Certificate + for _, intermed := range ca.Intermediates { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw}) + } + if ca.Root == nil { + return nil, fmt.Errorf("root certificate is nil") + } + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw}) + + caProto := prototrustroot.CertificateAuthority{ + Uri: ca.URI, + Subject: &protocommon.DistinguishedName{ + Organization: org, + CommonName: ca.Root.Subject.CommonName, + }, + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(ca.ValidityPeriodStart), + }, + CertChain: &protocommon.X509CertificateChain{ + Certificates: allCerts, + }, + } + + if !ca.ValidityPeriodEnd.IsZero() { + caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd) + } + + return &caProto, nil +} + +func timestampingAuthorityToProtobufCA(ca *SigstoreTimestampingAuthority) (*prototrustroot.CertificateAuthority, error) { + org := "" + if len(ca.Root.Subject.Organization) > 0 { + org = ca.Root.Subject.Organization[0] + } + var allCerts []*protocommon.X509Certificate + if ca.Leaf != nil { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Leaf.Raw}) + } + for _, intermed := range ca.Intermediates { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw}) + } + if ca.Root == nil { + return nil, fmt.Errorf("root certificate is nil") + } + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw}) + + caProto := prototrustroot.CertificateAuthority{ + Uri: ca.URI, + Subject: &protocommon.DistinguishedName{ + Organization: org, + CommonName: ca.Root.Subject.CommonName, + }, + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(ca.ValidityPeriodStart), + }, + CertChain: &protocommon.X509CertificateChain{ + Certificates: allCerts, + }, + } + + if !ca.ValidityPeriodEnd.IsZero() { + caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd) + } + + return &caProto, nil +} + +func transparencyLogToProtobufTL(tl *TransparencyLog) (*prototrustroot.TransparencyLogInstance, error) { + hashAlgo, err := hashAlgorithmToProtobufHashAlgorithm(tl.HashFunc) + if err != nil { + return nil, fmt.Errorf("failed converting hash algorithm to protobuf: %w", err) + } + publicKey, err := publicKeyToProtobufPublicKey(tl.PublicKey, tl.ValidityPeriodStart, tl.ValidityPeriodEnd) + if err != nil { + return nil, fmt.Errorf("failed converting public key to protobuf: %w", err) + } + trProto := prototrustroot.TransparencyLogInstance{ + BaseUrl: tl.BaseURL, + HashAlgorithm: hashAlgo, + PublicKey: publicKey, + LogId: &protocommon.LogId{ + KeyId: tl.ID, + }, + } + + return &trProto, nil +} + +func hashAlgorithmToProtobufHashAlgorithm(hashAlgorithm crypto.Hash) (protocommon.HashAlgorithm, error) { + switch hashAlgorithm { + case crypto.SHA256: + return protocommon.HashAlgorithm_SHA2_256, nil + case crypto.SHA384: + return protocommon.HashAlgorithm_SHA2_384, nil + case crypto.SHA512: + return protocommon.HashAlgorithm_SHA2_512, nil + case crypto.SHA3_256: + return protocommon.HashAlgorithm_SHA3_256, nil + case crypto.SHA3_384: + return protocommon.HashAlgorithm_SHA3_384, nil + default: + return 0, fmt.Errorf("unsupported hash algorithm for Merkle tree: %v", hashAlgorithm) + } +} + +func publicKeyToProtobufPublicKey(publicKey crypto.PublicKey, start time.Time, end time.Time) (*protocommon.PublicKey, error) { + pkd := protocommon.PublicKey{ + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(start), + }, + } + + if !end.IsZero() { + pkd.ValidFor.End = timestamppb.New(end) + } + + rawBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return nil, fmt.Errorf("failed marshalling public key: %w", err) + } + pkd.RawBytes = rawBytes + + switch p := publicKey.(type) { + case *ecdsa.PublicKey: + switch p.Curve { + case elliptic.P256(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 + case elliptic.P384(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 + case elliptic.P521(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 + default: + return nil, fmt.Errorf("unsupported curve for ecdsa key: %T", p.Curve) + } + case *rsa.PublicKey: + switch p.Size() * 8 { + case 2048: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 + case 3072: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 + case 4096: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 + default: + return nil, fmt.Errorf("unsupported public modulus for RSA key: %d", p.Size()) + } + case *ed25519.PublicKey: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ED25519 + default: + return nil, fmt.Errorf("unknown public key type: %T", p) + } + + return &pkd, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go new file mode 100644 index 00000000..939e25f0 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go @@ -0,0 +1,302 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlog + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" + dsse_v001 "github.com/sigstore/rekor/pkg/types/dsse/v0.0.1" + hashedrekord_v001 "github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1" + intoto_v002 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.2" + rekorVerify "github.com/sigstore/rekor/pkg/verify" + "github.com/sigstore/sigstore/pkg/signature" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +type Entry struct { + kind string + version string + rekorEntry types.EntryImpl + logEntryAnon models.LogEntryAnon + signedEntryTimestamp []byte +} + +type RekorPayload struct { + Body interface{} `json:"body"` + IntegratedTime int64 `json:"integratedTime"` + LogIndex int64 `json:"logIndex"` + LogID string `json:"logID"` //nolint:tagliatelle +} + +var ErrNilValue = errors.New("validation error: nil value in transaction log entry") + +func NewEntry(body []byte, integratedTime int64, logIndex int64, logID []byte, signedEntryTimestamp []byte, inclusionProof *models.InclusionProof) (*Entry, error) { + pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer()) + if err != nil { + return nil, err + } + rekorEntry, err := types.UnmarshalEntry(pe) + if err != nil { + return nil, err + } + + entry := &Entry{ + rekorEntry: rekorEntry, + logEntryAnon: models.LogEntryAnon{ + Body: base64.StdEncoding.EncodeToString(body), + IntegratedTime: swag.Int64(integratedTime), + LogIndex: swag.Int64(logIndex), + LogID: swag.String(string(logID)), + }, + kind: pe.Kind(), + version: rekorEntry.APIVersion(), + } + + if len(signedEntryTimestamp) > 0 { + entry.signedEntryTimestamp = signedEntryTimestamp + } + + if inclusionProof != nil { + entry.logEntryAnon.Verification = &models.LogEntryAnonVerification{ + InclusionProof: inclusionProof, + } + } + + return entry, nil +} + +// ParseEntry decodes the entry bytes to a specific entry type (types.EntryImpl). +func ParseEntry(protoEntry *v1.TransparencyLogEntry) (entry *Entry, err error) { + if protoEntry == nil || + protoEntry.CanonicalizedBody == nil || + protoEntry.IntegratedTime == 0 || + protoEntry.LogIndex == 0 || + protoEntry.LogId == nil || + protoEntry.LogId.KeyId == nil || + protoEntry.KindVersion == nil { + return nil, ErrNilValue + } + + signedEntryTimestamp := []byte{} + if protoEntry.InclusionPromise != nil && protoEntry.InclusionPromise.SignedEntryTimestamp != nil { + signedEntryTimestamp = protoEntry.InclusionPromise.SignedEntryTimestamp + } + + var inclusionProof *models.InclusionProof + + if protoEntry.InclusionProof != nil { + var hashes []string + + for _, v := range protoEntry.InclusionProof.Hashes { + hashes = append(hashes, hex.EncodeToString(v)) + } + + rootHash := hex.EncodeToString(protoEntry.InclusionProof.RootHash) + + if protoEntry.InclusionProof.Checkpoint == nil { + return nil, fmt.Errorf("inclusion proof missing required checkpoint") + } + if protoEntry.InclusionProof.Checkpoint.Envelope == "" { + return nil, fmt.Errorf("inclusion proof checkpoint empty") + } + + inclusionProof = &models.InclusionProof{ + LogIndex: swag.Int64(protoEntry.InclusionProof.LogIndex), + RootHash: &rootHash, + TreeSize: swag.Int64(protoEntry.InclusionProof.TreeSize), + Hashes: hashes, + Checkpoint: swag.String(protoEntry.InclusionProof.Checkpoint.Envelope), + } + } + + entry, err = NewEntry(protoEntry.CanonicalizedBody, protoEntry.IntegratedTime, protoEntry.LogIndex, protoEntry.LogId.KeyId, signedEntryTimestamp, inclusionProof) + if err != nil { + return nil, err + } + + if entry.kind != protoEntry.KindVersion.Kind || entry.version != protoEntry.KindVersion.Version { + return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, protoEntry.KindVersion.Kind, protoEntry.KindVersion.Version) + } + + return entry, nil +} + +func ValidateEntry(entry *Entry) error { + switch e := entry.rekorEntry.(type) { + case *dsse_v001.V001Entry: + err := e.DSSEObj.Validate(strfmt.Default) + if err != nil { + return err + } + case *hashedrekord_v001.V001Entry: + err := e.HashedRekordObj.Validate(strfmt.Default) + if err != nil { + return err + } + case *intoto_v002.V002Entry: + err := e.IntotoObj.Validate(strfmt.Default) + if err != nil { + return err + } + default: + return fmt.Errorf("unsupported entry type: %T", e) + } + + return nil +} + +func (entry *Entry) IntegratedTime() time.Time { + return time.Unix(*entry.logEntryAnon.IntegratedTime, 0) +} + +func (entry *Entry) Signature() []byte { + switch e := entry.rekorEntry.(type) { + case *dsse_v001.V001Entry: + sigBytes, err := base64.StdEncoding.DecodeString(*e.DSSEObj.Signatures[0].Signature) + if err != nil { + return []byte{} + } + return sigBytes + case *hashedrekord_v001.V001Entry: + return e.HashedRekordObj.Signature.Content + case *intoto_v002.V002Entry: + sigBytes, err := base64.StdEncoding.DecodeString(string(*e.IntotoObj.Content.Envelope.Signatures[0].Sig)) + if err != nil { + return []byte{} + } + return sigBytes + } + + return []byte{} +} + +func (entry *Entry) PublicKey() any { + var pemString []byte + + switch e := entry.rekorEntry.(type) { + case *dsse_v001.V001Entry: + pemString = []byte(*e.DSSEObj.Signatures[0].Verifier) + case *hashedrekord_v001.V001Entry: + pemString = []byte(e.HashedRekordObj.Signature.PublicKey.Content) + case *intoto_v002.V002Entry: + pemString = []byte(*e.IntotoObj.Content.Envelope.Signatures[0].PublicKey) + } + + certBlock, _ := pem.Decode(pemString) + + var pk any + var err error + + pk, err = x509.ParseCertificate(certBlock.Bytes) + if err != nil { + pk, err = x509.ParsePKIXPublicKey(certBlock.Bytes) + if err != nil { + return nil + } + } + + return pk +} + +func (entry *Entry) LogKeyID() string { + return *entry.logEntryAnon.LogID +} + +func (entry *Entry) LogIndex() int64 { + return *entry.logEntryAnon.LogIndex +} + +func (entry *Entry) Body() any { + return entry.logEntryAnon.Body +} + +func (entry *Entry) HasInclusionPromise() bool { + return entry.signedEntryTimestamp != nil +} + +func (entry *Entry) HasInclusionProof() bool { + return entry.logEntryAnon.Verification != nil +} + +func VerifyInclusion(entry *Entry, verifier signature.Verifier) error { + err := rekorVerify.VerifyInclusion(context.TODO(), &entry.logEntryAnon) + if err != nil { + return err + } + + err = rekorVerify.VerifyCheckpointSignature(&entry.logEntryAnon, verifier) + if err != nil { + return err + } + + return nil +} + +func VerifySET(entry *Entry, verifiers map[string]*root.TransparencyLog) error { + rekorPayload := RekorPayload{ + Body: entry.logEntryAnon.Body, + IntegratedTime: *entry.logEntryAnon.IntegratedTime, + LogIndex: *entry.logEntryAnon.LogIndex, + LogID: hex.EncodeToString([]byte(*entry.logEntryAnon.LogID)), + } + + verifier, ok := verifiers[hex.EncodeToString([]byte(*entry.logEntryAnon.LogID))] + if !ok { + return errors.New("rekor log public key not found for payload") + } + if verifier.ValidityPeriodStart.IsZero() { + return errors.New("rekor validity period start time not set") + } + if (verifier.ValidityPeriodStart.After(entry.IntegratedTime())) || + (!verifier.ValidityPeriodEnd.IsZero() && verifier.ValidityPeriodEnd.Before(entry.IntegratedTime())) { + return errors.New("rekor log public key not valid at payload integrated time") + } + + contents, err := json.Marshal(rekorPayload) + if err != nil { + return fmt.Errorf("marshaling: %w", err) + } + canonicalized, err := jsoncanonicalizer.Transform(contents) + if err != nil { + return fmt.Errorf("canonicalizing: %w", err) + } + + hash := sha256.Sum256(canonicalized) + if ecdsaPublicKey, ok := verifier.PublicKey.(*ecdsa.PublicKey); !ok { + return fmt.Errorf("unsupported public key type: %T", verifier.PublicKey) + } else if !ecdsa.VerifyASN1(ecdsaPublicKey, hash[:], entry.signedEntryTimestamp) { + return errors.New("unable to verify SET") + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go new file mode 100644 index 00000000..134b2d73 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go @@ -0,0 +1,207 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" + "github.com/theupdateframework/go-tuf/v2/metadata/updater" + + "github.com/sigstore/sigstore-go/pkg/util" +) + +// Client is a Sigstore TUF client +type Client struct { + cfg *config.UpdaterConfig + up *updater.Updater + opts *Options +} + +// New returns a new client with custom options +func New(opts *Options) (*Client, error) { + var c = Client{ + opts: opts, + } + dir := filepath.Join(opts.CachePath, URLToPath(opts.RepositoryBaseURL)) + var err error + + if c.cfg, err = config.New(opts.RepositoryBaseURL, opts.Root); err != nil { + return nil, fmt.Errorf("failed to create TUF client: %w", err) + } + + c.cfg.LocalMetadataDir = dir + c.cfg.LocalTargetsDir = filepath.Join(dir, "targets") + c.cfg.DisableLocalCache = c.opts.DisableLocalCache + c.cfg.PrefixTargetsWithHash = !c.opts.DisableConsistentSnapshot + + if c.cfg.DisableLocalCache { + c.opts.CachePath = "" + c.opts.CacheValidity = 0 + c.opts.ForceCache = false + } + + if opts.Fetcher != nil { + c.cfg.Fetcher = opts.Fetcher + } else { + fetcher := fetcher.DefaultFetcher{} + fetcher.SetHTTPUserAgent(util.ConstructUserAgent()) + c.cfg.Fetcher = &fetcher + } + + // Upon client creation, we may not perform a full TUF update, + // based on the cache control configuration. Start with a local + // client (only reads content on disk) and then decide if we + // must perform a full TUF update. + tmpCfg := *c.cfg + // Create a temporary config for the first use where UnsafeLocalMode + // is true. This means that when we first initialize the client, + // we are guaranteed to only read the metadata on disk. + // Based on that metadata we take a decision if a full TUF + // refresh should be done or not. As so, the tmpCfg is only needed + // here and not in future invocations. + tmpCfg.UnsafeLocalMode = true + c.up, err = updater.New(&tmpCfg) + if err != nil { + return nil, err + } + if err = c.loadMetadata(); err != nil { + return nil, err + } + + return &c, nil +} + +// DefaultClient returns a Sigstore TUF client for the public good instance +func DefaultClient() (*Client, error) { + opts := DefaultOptions() + + return New(opts) +} + +// loadMetadata controls if the client actually should perform a TUF refresh. +// The TUF specification mandates so, but for certain Sigstore clients, it +// may be beneficial to rely on the cache, or in air-gapped deployments it +// it may not even be possible. +func (c *Client) loadMetadata() error { + // Load the metadata into memory and verify it + if err := c.up.Refresh(); err != nil { + // this is most likely due to the lack of metadata files + // on disk. Perform a full update and return. + return c.Refresh() + } + + if c.opts.ForceCache { + return nil + } else if c.opts.CacheValidity > 0 { + cfg, err := LoadConfig(c.configPath()) + if err != nil { + // Config may not exist, don't error + // create a new empty config + cfg = &Config{} + } + + cacheValidUntil := cfg.LastTimestamp.AddDate(0, 0, c.opts.CacheValidity) + if time.Now().Before(cacheValidUntil) { + // No need to update + return nil + } + } + + return c.Refresh() +} + +func (c *Client) configPath() string { + var p = filepath.Join( + c.opts.CachePath, + fmt.Sprintf("%s.json", URLToPath(c.opts.RepositoryBaseURL)), + ) + + return p +} + +// Refresh forces a refresh of the underlying TUF client. +// As the tuf client updater does not support multiple refreshes during +// its life-time, this will replace the TUF client updater with a new one. +func (c *Client) Refresh() error { + var err error + + c.up, err = updater.New(c.cfg) + if err != nil { + return fmt.Errorf("failed to create tuf updater: %w", err) + } + err = c.up.Refresh() + if err != nil { + return fmt.Errorf("tuf refresh failed: %w", err) + } + + // Update config with last update + cfg, err := LoadConfig(c.configPath()) + if err != nil { + // Likely config file did not exit, create it + cfg = &Config{} + } + cfg.LastTimestamp = time.Now() + // ignore error writing update config file + _ = cfg.Persist(c.configPath()) + + return nil +} + +// GetTarget returns a target file from the TUF repository +func (c *Client) GetTarget(target string) ([]byte, error) { + // Set filepath to the empty string. When we get targets, + // we rely in the target info struct instead. + const filePath = "" + ti, err := c.up.GetTargetInfo(target) + if err != nil { + return nil, fmt.Errorf("getting info for target \"%s\": %w", target, err) + } + + path, tb, err := c.up.FindCachedTarget(ti, filePath) + if err != nil { + return nil, fmt.Errorf("getting target cache: %w", err) + } + if path != "" { + // Cached version found + return tb, nil + } + + // Download of target is needed + // Ignore targetsBaseURL, set to empty string + const targetsBaseURL = "" + _, tb, err = c.up.DownloadTarget(ti, filePath, targetsBaseURL) + if err != nil { + return nil, fmt.Errorf("failed to download target file %s - %w", target, err) + } + + return tb, nil +} + +// URLToPath converts a URL to a filename-compatible string +func URLToPath(url string) string { + // Strip scheme, replace slashes with dashes + // e.g. https://github.github.com/prod-tuf-root -> github.github.com-prod-tuf-root + fn := url + if len(fn) > 8 && fn[:8] == "https://" { + fn = fn[8:] + } + fn = strings.ReplaceAll(fn, "/", "-") + return fn +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go new file mode 100644 index 00000000..3f5a81f1 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go @@ -0,0 +1,54 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "encoding/json" + "fmt" + "os" + "time" +) + +type Config struct { + LastTimestamp time.Time `json:"lastTimestamp"` +} + +func LoadConfig(p string) (*Config, error) { + var c Config + + b, err := os.ReadFile(p) + if err != nil { + return nil, fmt.Errorf("failed to read config: %w", err) + } + err = json.Unmarshal(b, &c) + if err != nil { + return nil, fmt.Errorf("malformed config file: %w", err) + } + + return &c, nil +} + +func (c *Config) Persist(p string) error { + b, err := json.Marshal(c) + if err != nil { + return fmt.Errorf("failed to JSON marshal config: %w", err) + } + err = os.WriteFile(p, b, 0600) + if err != nil { + return fmt.Errorf("failed to write config: %w", err) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go new file mode 100644 index 00000000..14b5ae89 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go @@ -0,0 +1,173 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "embed" + "math" + "os" + "path/filepath" + + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +//go:embed repository +var embeddedRepo embed.FS + +const ( + DefaultMirror = "https://tuf-repo-cdn.sigstore.dev" + StagingMirror = "https://tuf-repo-cdn.sigstage.dev" + + // The following caching values can be used for the CacheValidity option + NoCache = 0 + MaxCache = math.MaxInt +) + +// Options represent the various options for a Sigstore TUF Client +type Options struct { + // CacheValidity period in days (default 0). The client will persist a + // timestamp with the cache after refresh. Note that the client will + // always refresh the cache if the metadata is expired or if the client is + // unable to find a persisted timestamp, so this is not an optimal control + // for air-gapped environments. Use const MaxCache to update the cache when + // the metadata is expired, though the first initialization will still + // refresh the cache. + CacheValidity int + // ForceCache controls if the cache should be used without update + // as long as the metadata is valid. Use ForceCache over CacheValidity + // if you want to always use the cache up until its expiration. Note that + // the client will refresh the cache once the metadata has expired, so this + // is not an optimal control for air-gapped environments. Clients instead + // should provide a trust root file directly to the client to bypass TUF. + ForceCache bool + // Root is the TUF trust anchor + Root []byte + // CachePath is the location on disk for TUF cache + // (default $HOME/.sigstore/tuf) + CachePath string + // RepositoryBaseURL is the TUF repository location URL + // (default https://tuf-repo-cdn.sigstore.dev) + RepositoryBaseURL string + // DisableLocalCache mode allows a client to work on a read-only + // files system if this is set, cache path is ignored. + DisableLocalCache bool + // DisableConsistentSnapshot + DisableConsistentSnapshot bool + // Fetcher is the metadata fetcher + Fetcher fetcher.Fetcher +} + +// WithCacheValidity sets the cache validity period in days +func (o *Options) WithCacheValidity(days int) *Options { + o.CacheValidity = days + return o +} + +// WithForceCache forces the client to use the cache without updating +func (o *Options) WithForceCache() *Options { + o.ForceCache = true + return o +} + +// WithRoot sets the TUF trust anchor +func (o *Options) WithRoot(root []byte) *Options { + o.Root = root + return o +} + +// WithCachePath sets the location on disk for TUF cache +func (o *Options) WithCachePath(path string) *Options { + o.CachePath = path + return o +} + +// WithRepositoryBaseURL sets the TUF repository location URL +func (o *Options) WithRepositoryBaseURL(url string) *Options { + o.RepositoryBaseURL = url + return o +} + +// WithDisableLocalCache sets the client to work on a read-only file system +func (o *Options) WithDisableLocalCache() *Options { + o.DisableLocalCache = true + return o +} + +// WithDisableConsistentSnapshot sets the client to disable consistent snapshot +func (o *Options) WithDisableConsistentSnapshot() *Options { + o.DisableConsistentSnapshot = true + return o +} + +// WithFetcher sets the metadata fetcher +func (o *Options) WithFetcher(f fetcher.Fetcher) *Options { + o.Fetcher = f + return o +} + +// DefaultOptions returns an options struct for the public good instance +func DefaultOptions() *Options { + var opts Options + var err error + + opts.Root = DefaultRoot() + home, err := os.UserHomeDir() + if err != nil { + // Fall back to using a TUF repository in the temp location + home = os.TempDir() + } + opts.CachePath = filepath.Join(home, ".sigstore", "root") + opts.RepositoryBaseURL = DefaultMirror + + return &opts +} + +// DefaultRoot returns the root.json for the public good instance +func DefaultRoot() []byte { + // The embed file system always uses forward slashes as path separators, + // even on Windows + p := "repository/root.json" + + b, err := embeddedRepo.ReadFile(p) + if err != nil { + // This should never happen. + // ReadFile from an embedded FS will never fail as long as + // the path is correct. If it fails, it would mean + // that the binary is not assembled as it should, and there + // is no way to recover from that. + panic(err) + } + + return b +} + +// StagingRoot returns the root.json for the staging instance +func StagingRoot() []byte { + // The embed file system always uses forward slashes as path separators, + // even on Windows + p := "repository/staging_root.json" + + b, err := embeddedRepo.ReadFile(p) + if err != nil { + // This should never happen. + // ReadFile from an embedded FS will never fail as long as + // the path is correct. If it fails, it would mean + // that the binary is not assembled as it should, and there + // is no way to recover from that. + panic(err) + } + + return b +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json new file mode 100644 index 00000000..451d4143 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json @@ -0,0 +1,165 @@ +{ + "signatures": [ + { + "keyid": "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", + "sig": "30460221008ab1f6f17d4f9e6d7dcf1c88912b6b53cc10388644ae1f09bc37a082cd06003e022100e145ef4c7b782d4e8107b53437e669d0476892ce999903ae33d14448366996e7" + }, + { + "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "sig": "3045022100c768b2f86da99569019c160a081da54ae36c34c0a3120d3cb69b53b7d113758e02204f671518f617b20d46537fae6c3b63bae8913f4f1962156105cc4f019ac35c6a" + }, + { + "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "sig": "3045022100b4434e6995d368d23e74759acd0cb9013c83a5d3511f0f997ec54c456ae4350a022015b0e265d182d2b61dc74e155d98b3c3fbe564ba05286aa14c8df02c9b756516" + }, + { + "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "sig": "304502210082c58411d989eb9f861410857d42381590ec9424dbdaa51e78ed13515431904e0220118185da6a6c2947131c17797e2bb7620ce26e5f301d1ceac5f2a7e58f9dcf2e" + }, + { + "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "sig": "3046022100c78513854cae9c32eaa6b88e18912f48006c2757a258f917312caba75948eb9e022100d9e1b4ce0adfe9fd2e2148d7fa27a2f40ba1122bd69da7612d8d1776b013c91d" + }, + { + "keyid": "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f", + "sig": "3045022056483a2d5d9ea9cec6e11eadfb33c484b614298faca15acf1c431b11ed7f734c022100d0c1d726af92a87e4e66459ca5adf38a05b44e1f94318423f954bae8bca5bb2e" + }, + { + "keyid": "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523", + "sig": "3046022100d004de88024c32dc5653a9f4843cfc5215427048ad9600d2cf9c969e6edff3d2022100d9ebb798f5fc66af10899dece014a8628ccf3c5402cd4a4270207472f8f6e712" + }, + { + "keyid": "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e", + "sig": "3046022100b7b09996c45ca2d4b05603e56baefa29718a0b71147cf8c6e66349baa61477df022100c4da80c717b4fa7bba0fd5c72da8a0499358b01358b2309f41d1456ea1e7e1d9" + }, + { + "keyid": "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e", + "sig": "3046022100be9782c30744e411a82fa85b5138d601ce148bc19258aec64e7ec24478f38812022100caef63dcaf1a4b9a500d3bd0e3f164ec18f1b63d7a9460d9acab1066db0f016d" + }, + { + "keyid": "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849", + "sig": "30450220746ec3f8534ce55531d0d01ff64964ef440d1e7d2c4c142409b8e9769f1ada6f022100e3b929fcd93ea18feaa0825887a7210489879a66780c07a83f4bd46e2f09ab3b" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2025-02-19T08:04:32Z", + "keys": { + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@santiagotorres" + }, + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@bobcallaway" + }, + "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@dlorenc" + }, + "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "gcpkms://projects/sigstore-root-signing/locations/global/keyRings/root/cryptoKeys/timestamp" + }, + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@joshuagl" + }, + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@mnm678" + } + }, + "roles": { + "root": { + "keyids": [ + "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70" + ], + "threshold": 3 + }, + "snapshot": { + "keyids": [ + "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 365 + }, + "targets": { + "keyids": [ + "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70" + ], + "threshold": 3 + }, + "timestamp": { + "keyids": [ + "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 7, + "x-tuf-on-ci-signing-period": 4 + } + }, + "spec_version": "1.0", + "version": 10, + "x-tuf-on-ci-expiry-period": 182, + "x-tuf-on-ci-signing-period": 31 + } +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json new file mode 100644 index 00000000..0dcde90a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json @@ -0,0 +1,107 @@ +{ + "signatures": [ + { + "keyid": "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "sig": "304502204d5d01c2ae4b846cc6d29d7c5676f5d99ea464a69bd464fef16a5d0cdd4a616d022100bf73b2b11b68bf7a7047480bf0d5961a3a40c524f64a82e2c90f59d4083e498e" + }, + { + "keyid": "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "sig": "3044022005a8e904d484b7f4c3bac53ed6babeee303f6308f81f9ea29a7a1f6ad51068c20220641303f1e5ab14b151525c63ca95b35df64ffc905c8883f96cbee703ed45a2df" + }, + { + "keyid": "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "sig": "" + }, + { + "keyid": "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5", + "sig": "" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2025-03-07T07:44:40Z", + "keys": { + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEoxkvDOmtGEknB3M+ZkPts8joDM0X\nIH5JZwPlgC2CXs/eqOuNF8AcEWwGYRiDhV/IMlQw5bg8PLICQcgsbrDiKg==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@mnm678" + }, + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE++Wv+DcLRk+mfkmlpCwl1GUi9EMh\npBUTz8K0fH7bE4mQuViGSyWA/eyMc0HvzZi6Xr0diHw0/lUPBvok214YQw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@kommendorkapten" + }, + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFHDb85JH+JYR1LQmxiz4UMokVMnP\nxKoWpaEnFCKXH8W4Fc/DfIxMnkpjCuvWUBdJXkO0aDIxwsij8TOFh2R7dw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@joshuagl" + }, + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEohqIdE+yTl4OxpX8ZxNUPrg3SL9H\nBDnhZuceKkxy2oMhUOxhWweZeG3bfM1T4ZLnJimC6CAYVU5+F5jZCoftRw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@jku" + }, + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExxmEtmhF5U+i+v/6he4BcSLzCgMx\n/0qSrvDg6bUWwUrkSKS2vDpcJrhGy5fmmhRrGawjPp1ALpC3y1kqFTpXDg==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "gcpkms:projects/projectsigstore-staging/locations/global/keyRings/tuf-keyring/cryptoKeys/tuf-key/cryptoKeyVersions/2" + } + }, + "roles": { + "root": { + "keyids": [ + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5" + ], + "threshold": 2 + }, + "snapshot": { + "keyids": [ + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 365 + }, + "targets": { + "keyids": [ + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5" + ], + "threshold": 1 + }, + "timestamp": { + "keyids": [ + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 7, + "x-tuf-on-ci-signing-period": 6 + } + }, + "spec_version": "1.0", + "version": 10, + "x-tuf-on-ci-expiry-period": 182, + "x-tuf-on-ci-signing-period": 35 + } +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go new file mode 100644 index 00000000..4ab68528 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "runtime/debug" +) + +func ConstructUserAgent() string { + userAgent := "sigstore-go" + + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return userAgent + } + + for _, eachDep := range buildInfo.Deps { + if eachDep.Path == "github.com/sigstore/sigstore-go" { + userAgent += "/" + userAgent += eachDep.Version + } + } + + return userAgent +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go new file mode 100644 index 00000000..e33d915a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go @@ -0,0 +1,34 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "errors" + "time" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +func VerifyLeafCertificate(observerTimestamp time.Time, leafCert *x509.Certificate, trustedMaterial root.TrustedMaterial) ([][]*x509.Certificate, error) { // nolint: revive + for _, ca := range trustedMaterial.FulcioCertificateAuthorities() { + chains, err := ca.Verify(leafCert, observerTimestamp) + if err == nil { + return chains, nil + } + } + + return nil, errors.New("leaf certificate verification failed") +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go new file mode 100644 index 00000000..68fa7ad6 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go @@ -0,0 +1,217 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" +) + +type SubjectAlternativeNameMatcher struct { + SubjectAlternativeName string `json:"subjectAlternativeName"` + Regexp regexp.Regexp `json:"regexp,omitempty"` +} + +type IssuerMatcher struct { + Issuer string `json:"issuer"` + Regexp regexp.Regexp `json:"regexp,omitempty"` +} + +type CertificateIdentity struct { + SubjectAlternativeName SubjectAlternativeNameMatcher `json:"subjectAlternativeName"` + Issuer IssuerMatcher `json:"issuer"` + certificate.Extensions +} + +type CertificateIdentities []CertificateIdentity + +type ErrValueMismatch struct { + object string + expected string + actual string +} + +func (e *ErrValueMismatch) Error() string { + return fmt.Sprintf("expected %s value \"%s\", got \"%s\"", e.object, e.expected, e.actual) +} + +type ErrValueRegexMismatch struct { + object string + regex string + value string +} + +func (e *ErrValueRegexMismatch) Error() string { + return fmt.Sprintf("expected %s value to match regex \"%s\", got \"%s\"", e.object, e.regex, e.value) +} + +type ErrNoMatchingCertificateIdentity struct { + errors []error +} + +func (e *ErrNoMatchingCertificateIdentity) Error() string { + if len(e.errors) > 0 { + return fmt.Sprintf("no matching CertificateIdentity found, last error: %v", e.errors[len(e.errors)-1]) + } + return "no matching CertificateIdentity found" +} + +func (e *ErrNoMatchingCertificateIdentity) Unwrap() []error { + return e.errors +} + +// NewSANMatcher provides an easier way to create a SubjectAlternativeNameMatcher. +// If the regexpStr fails to compile into a Regexp, an error is returned. +func NewSANMatcher(sanValue string, regexpStr string) (SubjectAlternativeNameMatcher, error) { + r, err := regexp.Compile(regexpStr) + if err != nil { + return SubjectAlternativeNameMatcher{}, err + } + + return SubjectAlternativeNameMatcher{ + SubjectAlternativeName: sanValue, + Regexp: *r}, nil +} + +// The default Regexp json marshal is quite ugly, so we override it here. +func (s *SubjectAlternativeNameMatcher) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + SubjectAlternativeName string `json:"subjectAlternativeName"` + Regexp string `json:"regexp,omitempty"` + }{ + SubjectAlternativeName: s.SubjectAlternativeName, + Regexp: s.Regexp.String(), + }) +} + +// Verify checks if the actualCert matches the SANMatcher's Value and +// Regexp – if those values have been provided. +func (s SubjectAlternativeNameMatcher) Verify(actualCert certificate.Summary) error { + if s.SubjectAlternativeName != "" && + actualCert.SubjectAlternativeName != s.SubjectAlternativeName { + return &ErrValueMismatch{"SAN", string(s.SubjectAlternativeName), string(actualCert.SubjectAlternativeName)} + } + + if s.Regexp.String() != "" && + !s.Regexp.MatchString(actualCert.SubjectAlternativeName) { + return &ErrValueRegexMismatch{"SAN", string(s.Regexp.String()), string(actualCert.SubjectAlternativeName)} + } + return nil +} + +func NewIssuerMatcher(issuerValue, regexpStr string) (IssuerMatcher, error) { + r, err := regexp.Compile(regexpStr) + if err != nil { + return IssuerMatcher{}, err + } + + return IssuerMatcher{Issuer: issuerValue, Regexp: *r}, nil +} + +func (i *IssuerMatcher) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Issuer string `json:"issuer"` + Regexp string `json:"regexp,omitempty"` + }{ + Issuer: i.Issuer, + Regexp: i.Regexp.String(), + }) +} + +func (i IssuerMatcher) Verify(actualCert certificate.Summary) error { + if i.Issuer != "" && + actualCert.Extensions.Issuer != i.Issuer { + return &ErrValueMismatch{"issuer", string(i.Issuer), string(actualCert.Extensions.Issuer)} + } + + if i.Regexp.String() != "" && + !i.Regexp.MatchString(actualCert.Extensions.Issuer) { + return &ErrValueRegexMismatch{"issuer", string(i.Regexp.String()), string(actualCert.Extensions.Issuer)} + } + return nil +} + +func NewCertificateIdentity(sanMatcher SubjectAlternativeNameMatcher, issuerMatcher IssuerMatcher, extensions certificate.Extensions) (CertificateIdentity, error) { + if sanMatcher.SubjectAlternativeName == "" && sanMatcher.Regexp.String() == "" { + return CertificateIdentity{}, errors.New("when verifying a certificate identity, there must be subject alternative name criteria") + } + + if issuerMatcher.Issuer == "" && issuerMatcher.Regexp.String() == "" { + return CertificateIdentity{}, errors.New("when verifying a certificate identity, must specify Issuer criteria") + } + + if extensions.Issuer != "" { + return CertificateIdentity{}, errors.New("please specify issuer in IssuerMatcher, not Extensions") + } + + certID := CertificateIdentity{ + SubjectAlternativeName: sanMatcher, + Issuer: issuerMatcher, + Extensions: extensions, + } + + return certID, nil +} + +// NewShortCertificateIdentity provides a more convenient way of initializing +// a CertificiateIdentity with a SAN and the Issuer OID extension. If you need +// to check more OID extensions, use NewCertificateIdentity instead. +func NewShortCertificateIdentity(issuer, issuerRegex, sanValue, sanRegex string) (CertificateIdentity, error) { + sanMatcher, err := NewSANMatcher(sanValue, sanRegex) + if err != nil { + return CertificateIdentity{}, err + } + + issuerMatcher, err := NewIssuerMatcher(issuer, issuerRegex) + if err != nil { + return CertificateIdentity{}, err + } + + return NewCertificateIdentity(sanMatcher, issuerMatcher, certificate.Extensions{}) +} + +// Verify verifies the CertificateIdentities, and if ANY of them match the cert, +// it returns the CertificateIdentity that matched. If none match, it returns an +// error. +func (i CertificateIdentities) Verify(cert certificate.Summary) (*CertificateIdentity, error) { + multierr := &ErrNoMatchingCertificateIdentity{} + var err error + for _, ci := range i { + if err = ci.Verify(cert); err == nil { + return &ci, nil + } + multierr.errors = append(multierr.errors, err) + } + return nil, multierr +} + +// Verify checks if the actualCert matches the CertificateIdentity's SAN and +// any of the provided OID extension values. Any empty values are ignored. +func (c CertificateIdentity) Verify(actualCert certificate.Summary) error { + var err error + if err = c.SubjectAlternativeName.Verify(actualCert); err != nil { + return err + } + + if err = c.Issuer.Verify(actualCert); err != nil { + return err + } + + return certificate.CompareExtensions(c.Extensions, actualCert.Extensions) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go new file mode 100644 index 00000000..18263c09 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go @@ -0,0 +1,39 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "fmt" +) + +type ErrVerification struct { + err error +} + +func NewVerificationError(e error) ErrVerification { + return ErrVerification{e} +} + +func (e ErrVerification) Unwrap() error { + return e.err +} + +func (e ErrVerification) String() string { + return fmt.Sprintf("verification error: %s", e.err.Error()) +} + +func (e ErrVerification) Error() string { + return e.String() +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go new file mode 100644 index 00000000..2ff08034 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go @@ -0,0 +1,121 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "errors" + "time" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" +) + +var errNotImplemented = errors.New("not implemented") + +type HasInclusionPromise interface { + HasInclusionPromise() bool +} + +type HasInclusionProof interface { + HasInclusionProof() bool +} + +type SignatureProvider interface { + SignatureContent() (SignatureContent, error) +} + +type SignedTimestampProvider interface { + Timestamps() ([][]byte, error) +} + +type TlogEntryProvider interface { + TlogEntries() ([]*tlog.Entry, error) +} + +type VerificationProvider interface { + VerificationContent() (VerificationContent, error) +} + +type SignedEntity interface { + HasInclusionPromise + HasInclusionProof + SignatureProvider + SignedTimestampProvider + TlogEntryProvider + VerificationProvider +} + +type VerificationContent interface { + CompareKey(any, root.TrustedMaterial) bool + ValidAtTime(time.Time, root.TrustedMaterial) bool + Certificate() *x509.Certificate + PublicKey() PublicKeyProvider +} + +type SignatureContent interface { + Signature() []byte + EnvelopeContent() EnvelopeContent + MessageSignatureContent() MessageSignatureContent +} + +type PublicKeyProvider interface { + Hint() string +} + +type MessageSignatureContent interface { + Digest() []byte + DigestAlgorithm() string + Signature() []byte +} + +type EnvelopeContent interface { + RawEnvelope() *dsse.Envelope + Statement() (*in_toto.Statement, error) +} + +// BaseSignedEntity is a helper struct that implements all the interfaces +// of SignedEntity. It can be embedded in a struct to implement the SignedEntity +// interface. This may be useful for testing, or for implementing a SignedEntity +// that only implements a subset of the interfaces. +type BaseSignedEntity struct{} + +var _ SignedEntity = &BaseSignedEntity{} + +func (b *BaseSignedEntity) HasInclusionPromise() bool { + return false +} + +func (b *BaseSignedEntity) HasInclusionProof() bool { + return false +} + +func (b *BaseSignedEntity) VerificationContent() (VerificationContent, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) SignatureContent() (SignatureContent, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) Timestamps() ([][]byte, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) TlogEntries() ([]*tlog.Entry, error) { + return nil, errNotImplemented +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go new file mode 100644 index 00000000..bf447c28 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go @@ -0,0 +1,100 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "encoding/hex" + "errors" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/ctutil" + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509util" + "github.com/sigstore/sigstore-go/pkg/root" +) + +// VerifySignedCertificateTimestamp, given a threshold, TrustedMaterial, and a +// leaf certificate, will extract SCTs from the leaf certificate and verify the +// timestamps using the TrustedMaterial's FulcioCertificateAuthorities() and +// CTLogs() +func VerifySignedCertificateTimestamp(chains [][]*x509.Certificate, threshold int, trustedMaterial root.TrustedMaterial) error { // nolint: revive + if len(chains) == 0 || len(chains[0]) == 0 || chains[0][0] == nil { + return errors.New("no chains provided") + } + // The first certificate in the chain is always the leaf certificate + leaf := chains[0][0] + + ctlogs := trustedMaterial.CTLogs() + + scts, err := x509util.ParseSCTsFromCertificate(leaf.Raw) + if err != nil { + return err + } + + leafCTCert, err := ctx509.ParseCertificates(leaf.Raw) + if err != nil { + return err + } + + verified := 0 + for _, sct := range scts { + encodedKeyID := hex.EncodeToString(sct.LogID.KeyID[:]) + key, ok := ctlogs[encodedKeyID] + if !ok { + // skip entries the trust root cannot verify + continue + } + + // Ensure sct is within ctlog validity window + sctTime := ct.TimestampToTime(sct.Timestamp) + if !key.ValidityPeriodStart.IsZero() && sctTime.Before(key.ValidityPeriodStart) { + // skip entries that were before ctlog key start time + continue + } + if !key.ValidityPeriodEnd.IsZero() && sctTime.After(key.ValidityPeriodEnd) { + // skip entries that were after ctlog key end time + continue + } + + for _, chain := range chains { + fulcioChain := make([]*ctx509.Certificate, len(leafCTCert)) + copy(fulcioChain, leafCTCert) + + if len(chain) < 2 { + continue + } + parentCert := chain[1].Raw + + fulcioIssuer, err := ctx509.ParseCertificates(parentCert) + if err != nil { + continue + } + fulcioChain = append(fulcioChain, fulcioIssuer...) + + err = ctutil.VerifySCT(key.PublicKey, fulcioChain, sct, true) + if err == nil { + verified++ + } + } + } + + if verified < threshold { + return fmt.Errorf("only able to verify %d SCT entries; unable to meet threshold of %d", verified, threshold) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go new file mode 100644 index 00000000..1d96c121 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go @@ -0,0 +1,271 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "crypto" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/signature" + sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +const maxAllowedSubjects = 1024 +const maxAllowedSubjectDigests = 32 + +var ErrDSSEInvalidSignatureCount = errors.New("exactly one signature is required") + +func VerifySignature(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive + var verifier signature.Verifier + var err error + + verifier, err = getSignatureVerifier(verificationContent, trustedMaterial) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + + if envelope := sigContent.EnvelopeContent(); envelope != nil { + return verifyEnvelope(verifier, envelope) + } else if msg := sigContent.MessageSignatureContent(); msg != nil { + return errors.New("artifact must be provided to verify message signature") + } + + // handle an invalid signature content message + return fmt.Errorf("signature content has neither an envelope or a message") +} + +func VerifySignatureWithArtifact(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifact io.Reader) error { // nolint: revive + var verifier signature.Verifier + var err error + + verifier, err = getSignatureVerifier(verificationContent, trustedMaterial) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + + if envelope := sigContent.EnvelopeContent(); envelope != nil { + return verifyEnvelopeWithArtifact(verifier, envelope, artifact) + } else if msg := sigContent.MessageSignatureContent(); msg != nil { + return verifyMessageSignature(verifier, msg, artifact) + } + + // handle an invalid signature content message + return fmt.Errorf("signature content has neither an envelope or a message") +} + +func VerifySignatureWithArtifactDigest(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifactDigest []byte, artifactDigestAlgorithm string) error { // nolint: revive + var verifier signature.Verifier + var err error + + verifier, err = getSignatureVerifier(verificationContent, trustedMaterial) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + + if envelope := sigContent.EnvelopeContent(); envelope != nil { + return verifyEnvelopeWithArtifactDigest(verifier, envelope, artifactDigest, artifactDigestAlgorithm) + } else if msg := sigContent.MessageSignatureContent(); msg != nil { + return verifyMessageSignatureWithArtifactDigest(verifier, msg, artifactDigest) + } + + // handle an invalid signature content message + return fmt.Errorf("signature content has neither an envelope or a message") +} + +func getSignatureVerifier(verificationContent VerificationContent, tm root.TrustedMaterial) (signature.Verifier, error) { + if leafCert := verificationContent.Certificate(); leafCert != nil { + // TODO: Inspect certificate's SignatureAlgorithm to determine hash function + return signature.LoadVerifier(leafCert.PublicKey, crypto.SHA256) + } else if pk := verificationContent.PublicKey(); pk != nil { + return tm.PublicKeyVerifier(pk.Hint()) + } + + return nil, fmt.Errorf("no public key or certificate found") +} + +func verifyEnvelope(verifier signature.Verifier, envelope EnvelopeContent) error { + dsseEnv := envelope.RawEnvelope() + + // A DSSE envelope in a Sigstore bundle MUST only contain one + // signature, even though DSSE is more permissive. + if len(dsseEnv.Signatures) != 1 { + return ErrDSSEInvalidSignatureCount + } + pub, err := verifier.PublicKey() + if err != nil { + return fmt.Errorf("could not fetch verifier public key: %w", err) + } + envVerifier, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{ + SignatureVerifier: verifier, + Pub: pub, + }) + + if err != nil { + return fmt.Errorf("could not load envelope verifier: %w", err) + } + + _, err = envVerifier.Verify(context.TODO(), dsseEnv) + if err != nil { + return fmt.Errorf("could not verify envelope: %w", err) + } + + return nil +} + +func verifyEnvelopeWithArtifact(verifier signature.Verifier, envelope EnvelopeContent, artifact io.Reader) error { + err := verifyEnvelope(verifier, envelope) + if err != nil { + return err + } + statement, err := envelope.Statement() + if err != nil { + return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err) + } + if err = limitSubjects(statement); err != nil { + return err + } + + var artifactDigestAlgorithm string + var artifactDigest []byte + + // Determine artifact digest algorithm by looking at the first subject's + // digests. This assumes that if a statement contains multiple subjects, + // they all use the same digest algorithm(s). + if len(statement.Subject) == 0 { + return errors.New("no subjects found in statement") + } + if len(statement.Subject[0].Digest) == 0 { + return errors.New("no digests found in statement") + } + + // Select the strongest digest algorithm available. + for _, alg := range []string{"sha512", "sha384", "sha256"} { + if _, ok := statement.Subject[0].Digest[alg]; ok { + artifactDigestAlgorithm = alg + continue + } + } + if artifactDigestAlgorithm == "" { + return errors.New("could not verify artifact: unsupported digest algorithm") + } + + // Compute digest of the artifact. + var hasher hash.Hash + switch artifactDigestAlgorithm { + case "sha512": + hasher = crypto.SHA512.New() + case "sha384": + hasher = crypto.SHA384.New() + case "sha256": + hasher = crypto.SHA256.New() + } + _, err = io.Copy(hasher, artifact) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to calculate digest: %w", err) + } + artifactDigest = hasher.Sum(nil) + + // Look for artifact digest in statement + for _, subject := range statement.Subject { + for alg, digest := range subject.Digest { + hexdigest, err := hex.DecodeString(digest) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to decode subject digest: %w", err) + } + if alg == artifactDigestAlgorithm && bytes.Equal(artifactDigest, hexdigest) { + return nil + } + } + } + return fmt.Errorf("could not verify artifact: unable to confirm artifact digest is present in subject digests: %w", err) +} + +func verifyEnvelopeWithArtifactDigest(verifier signature.Verifier, envelope EnvelopeContent, artifactDigest []byte, artifactDigestAlgorithm string) error { + err := verifyEnvelope(verifier, envelope) + if err != nil { + return err + } + statement, err := envelope.Statement() + if err != nil { + return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err) + } + if err = limitSubjects(statement); err != nil { + return err + } + + for _, subject := range statement.Subject { + for alg, digest := range subject.Digest { + if alg == artifactDigestAlgorithm { + hexdigest, err := hex.DecodeString(digest) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to decode subject digest: %w", err) + } + if bytes.Equal(hexdigest, artifactDigest) { + return nil + } + } + } + } + return errors.New("provided artifact digest does not match any digest in statement") +} + +func verifyMessageSignature(verifier signature.Verifier, msg MessageSignatureContent, artifact io.Reader) error { + err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), artifact) + if err != nil { + return fmt.Errorf("could not verify message: %w", err) + } + + return nil +} + +func verifyMessageSignatureWithArtifactDigest(verifier signature.Verifier, msg MessageSignatureContent, artifactDigest []byte) error { + if !bytes.Equal(artifactDigest, msg.Digest()) { + return errors.New("artifact does not match digest") + } + if _, ok := verifier.(*signature.ED25519Verifier); ok { + return errors.New("message signatures with ed25519 signatures can only be verified with artifacts, and not just their digest") + } + err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), bytes.NewReader([]byte{}), options.WithDigest(artifactDigest)) + + if err != nil { + return fmt.Errorf("could not verify message: %w", err) + } + + return nil +} + +// limitSubjects limits the number of subjects and digests in a statement to prevent DoS. +func limitSubjects(statement *in_toto.Statement) error { + if len(statement.Subject) > maxAllowedSubjects { + return fmt.Errorf("too many subjects: %d > %d", len(statement.Subject), maxAllowedSubjects) + } + for _, subject := range statement.Subject { + // limit the number of digests too + if len(subject.Digest) > maxAllowedSubjectDigests { + return fmt.Errorf("too many digests: %d > %d", len(subject.Digest), maxAllowedSubjectDigests) + } + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go new file mode 100644 index 00000000..918bf545 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go @@ -0,0 +1,756 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "encoding/asn1" + "encoding/json" + "errors" + "fmt" + "io" + "time" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "google.golang.org/protobuf/encoding/protojson" +) + +const ( + VerificationResultMediaType01 = "application/vnd.dev.sigstore.verificationresult+json;version=0.1" +) + +type SignedEntityVerifier struct { + trustedMaterial root.TrustedMaterial + config VerifierConfig +} + +type VerifierConfig struct { // nolint: revive + // requireSignedTimestamps requires RFC3161 timestamps to verify + // short-lived certificates + requireSignedTimestamps bool + // signedTimestampThreshold is the minimum number of verified + // RFC3161 timestamps in a bundle + signedTimestampThreshold int + // requireIntegratedTimestamps requires log entry integrated timestamps to + // verify short-lived certificates + requireIntegratedTimestamps bool + // integratedTimeThreshold is the minimum number of log entry + // integrated timestamps in a bundle + integratedTimeThreshold int + // requireObserverTimestamps requires RFC3161 timestamps and/or log + // integrated timestamps to verify short-lived certificates + requireObserverTimestamps bool + // observerTimestampThreshold is the minimum number of verified + // RFC3161 timestamps and/or log integrated timestamps in a bundle + observerTimestampThreshold int + // requireTlogEntries requires log inclusion proofs in a bundle + requireTlogEntries bool + // tlogEntriesThreshold is the minimum number of verified inclusion + // proofs in a bundle + tlogEntriesThreshold int + // requireSCTs requires SCTs in Fulcio certificates + requireSCTs bool + // ctlogEntriesTreshold is the minimum number of verified SCTs in + // a Fulcio certificate + ctlogEntriesThreshold int + // useCurrentTime uses the current time rather than a provided signed + // or log timestamp. Most workflows will not use this option + useCurrentTime bool +} + +type VerifierOption func(*VerifierConfig) error + +// NewSignedEntityVerifier creates a new SignedEntityVerifier. It takes a +// root.TrustedMaterial, which contains a set of trusted public keys and +// certificates, and a set of VerifierConfigurators, which set the config +// that determines the behaviour of the Verify function. +// +// VerifierConfig's set of options should match the properties of a given +// Sigstore deployment, i.e. whether to expect SCTs, Tlog entries, or signed +// timestamps. +func NewSignedEntityVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*SignedEntityVerifier, error) { + var err error + c := VerifierConfig{} + + for _, opt := range options { + err = opt(&c) + if err != nil { + return nil, fmt.Errorf("failed to configure verifier: %w", err) + } + } + + err = c.Validate() + if err != nil { + return nil, err + } + + v := &SignedEntityVerifier{ + trustedMaterial: trustedMaterial, + config: c, + } + + return v, nil +} + +// WithSignedTimestamps configures the SignedEntityVerifier to expect RFC 3161 +// timestamps from a Timestamp Authority, verify them using the TrustedMaterial's +// TimestampingAuthorities(), and, if it exists, use the resulting timestamp(s) +// to verify the Fulcio certificate. +func WithSignedTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("signed timestamp threshold must be at least 1") + } + c.requireSignedTimestamps = true + c.signedTimestampThreshold = threshold + return nil + } +} + +// WithObserverTimestamps configures the SignedEntityVerifier to expect +// timestamps from either an RFC3161 timestamp authority or a log's +// SignedEntryTimestamp. These are verified using the TrustedMaterial's +// TimestampingAuthorities() or RekorLogs(), and used to verify +// the Fulcio certificate. +func WithObserverTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("observer timestamp threshold must be at least 1") + } + c.requireObserverTimestamps = true + c.observerTimestampThreshold = threshold + return nil + } +} + +// WithTransparencyLog configures the SignedEntityVerifier to expect +// Transparency Log inclusion proofs or SignedEntryTimestamps, verifying them +// using the TrustedMaterial's RekorLogs(). +func WithTransparencyLog(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("transparency log entry threshold must be at least 1") + } + c.requireTlogEntries = true + c.tlogEntriesThreshold = threshold + return nil + } +} + +// WithIntegratedTimestamps configures the SignedEntityVerifier to +// expect log entry integrated timestamps from either SignedEntryTimestamps +// or live log lookups. +func WithIntegratedTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + c.requireIntegratedTimestamps = true + c.integratedTimeThreshold = threshold + return nil + } +} + +// WithSignedCertificateTimestamps configures the SignedEntityVerifier to +// expect the Fulcio certificate to have a SignedCertificateTimestamp, and +// verify it using the TrustedMaterial's CTLogAuthorities(). +func WithSignedCertificateTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("ctlog entry threshold must be at least 1") + } + c.requireSCTs = true + c.ctlogEntriesThreshold = threshold + return nil + } +} + +// WithCurrentTime configures the SignedEntityVerifier to not expect +// any timestamps from either a Timestamp Authority or a Transparency Log. +// This option should not be enabled when verifying short-lived certificates, +// as an observer timestamp is needed. This option is useful primarily for +// private deployments with long-lived code signing certificates. +func WithCurrentTime() VerifierOption { + return func(c *VerifierConfig) error { + c.useCurrentTime = true + return nil + } +} + +func (c *VerifierConfig) Validate() error { + if !c.requireObserverTimestamps && !c.requireSignedTimestamps && !c.requireIntegratedTimestamps && !c.useCurrentTime { + return errors.New("when initializing a new SignedEntityVerifier, you must specify at least one of " + + "WithObserverTimestamps(), WithSignedTimestamps(), or WithIntegratedTimestamps()") + } + + return nil +} + +type VerificationResult struct { + MediaType string `json:"mediaType"` + Statement *in_toto.Statement `json:"statement,omitempty"` + Signature *SignatureVerificationResult `json:"signature,omitempty"` + VerifiedTimestamps []TimestampVerificationResult `json:"verifiedTimestamps"` + VerifiedIdentity *CertificateIdentity `json:"verifiedIdentity,omitempty"` +} + +type SignatureVerificationResult struct { + PublicKeyID *[]byte `json:"publicKeyId,omitempty"` + Certificate *certificate.Summary `json:"certificate,omitempty"` +} + +type TimestampVerificationResult struct { + Type string `json:"type"` + URI string `json:"uri"` + Timestamp time.Time `json:"timestamp"` +} + +func NewVerificationResult() *VerificationResult { + return &VerificationResult{ + MediaType: VerificationResultMediaType01, + } +} + +// MarshalJSON deals with protojson needed for the Statement. +// Can be removed when https://github.com/in-toto/attestation/pull/403 is merged. +func (b *VerificationResult) MarshalJSON() ([]byte, error) { + statement, err := protojson.Marshal(b.Statement) + if err != nil { + return nil, err + } + // creating a type alias to avoid infinite recursion, as MarshalJSON is + // not copied into the alias. + type Alias VerificationResult + return json.Marshal(struct { + Alias + Statement json.RawMessage `json:"statement,omitempty"` + }{ + Alias: Alias(*b), + Statement: statement, + }) +} + +func (b *VerificationResult) UnmarshalJSON(data []byte) error { + b.Statement = &in_toto.Statement{} + type Alias VerificationResult + aux := &struct { + Alias + Statement json.RawMessage `json:"statement,omitempty"` + }{ + Alias: Alias(*b), + } + if err := json.Unmarshal(data, aux); err != nil { + return err + } + return protojson.Unmarshal(aux.Statement, b.Statement) +} + +type PolicyOption func(*PolicyConfig) error +type ArtifactPolicyOption func(*PolicyConfig) error + +// PolicyBuilder is responsible for building & validating a PolicyConfig +type PolicyBuilder struct { + artifactPolicy ArtifactPolicyOption + policyOptions []PolicyOption +} + +func (pc PolicyBuilder) Options() []PolicyOption { + arr := []PolicyOption{PolicyOption(pc.artifactPolicy)} + return append(arr, pc.policyOptions...) +} + +func (pc PolicyBuilder) BuildConfig() (*PolicyConfig, error) { + var err error + + policy := &PolicyConfig{} + for _, applyOption := range pc.Options() { + err = applyOption(policy) + if err != nil { + return nil, err + } + } + + if err := policy.Validate(); err != nil { + return nil, err + } + + return policy, nil +} + +type PolicyConfig struct { + weDoNotExpectAnArtifact bool + weDoNotExpectIdentities bool + weExpectSigningKey bool + certificateIdentities CertificateIdentities + verifyArtifact bool + artifact io.Reader + verifyArtifactDigest bool + artifactDigest []byte + artifactDigestAlgorithm string +} + +func (p *PolicyConfig) Validate() error { + if p.WeExpectIdentities() && len(p.certificateIdentities) == 0 { + return errors.New("can't verify identities without providing at least one identity") + } + + return nil +} + +// WeExpectAnArtifact returns true if the Verify algorithm should perform +// signature verification with an an artifact provided by either the +// WithArtifact or the WithArtifactDigest functions. +// +// By default, unless explicitly turned off, we should always expect to verify +// a SignedEntity's signature using an artifact. Bools are initialized to false, +// so this behaviour is therefore controlled by the weDoNotExpectAnArtifact +// field. +// +// Double negatives are confusing, though. To aid with comprehension of the +// main Verify loop, this function therefore just wraps the double negative. +func (p *PolicyConfig) WeExpectAnArtifact() bool { + return !p.weDoNotExpectAnArtifact +} + +// WeExpectIdentities returns true if the Verify algorithm should check +// whether the SignedEntity's certificate was created by one of the identities +// provided by the WithCertificateIdentity function. +// +// By default, unless explicitly turned off, we should always expect to enforce +// that a SignedEntity's certificate was created by an Identity we trust. Bools +// are initialized to false, so this behaviour is therefore controlled by the +// weDoNotExpectIdentities field. +// +// Double negatives are confusing, though. To aid with comprehension of the +// main Verify loop, this function therefore just wraps the double negative. +func (p *PolicyConfig) WeExpectIdentities() bool { + return !p.weDoNotExpectIdentities +} + +// WeExpectSigningKey returns true if we expect the SignedEntity to be signed +// with a key and not a certificate. +func (p *PolicyConfig) WeExpectSigningKey() bool { + return p.weExpectSigningKey +} + +func NewPolicy(artifactOpt ArtifactPolicyOption, options ...PolicyOption) PolicyBuilder { + return PolicyBuilder{artifactPolicy: artifactOpt, policyOptions: options} +} + +// WithoutIdentitiesUnsafe allows the caller of Verify to skip enforcing any +// checks on the identity that created the SignedEntity being verified. +// +// Do not use this option unless you know what you are doing! +// +// As the name implies, using WithoutIdentitiesUnsafe is not safe: outside of +// exceptional circumstances, we should always enforce that the SignedEntity +// being verified was signed by a trusted CertificateIdentity. +// +// For more information, consult WithCertificateIdentity. +func WithoutIdentitiesUnsafe() PolicyOption { + return func(p *PolicyConfig) error { + if len(p.certificateIdentities) > 0 { + return errors.New("can't use WithoutIdentitiesUnsafe while specifying CertificateIdentities") + } + + p.weDoNotExpectIdentities = true + return nil + } +} + +// WithCertificateIdentity allows the caller of Verify to enforce that the +// SignedEntity being verified was signed by the given identity, as defined by +// the Fulcio certificate embedded in the entity. If this policy is enabled, +// but the SignedEntity does not have a certificate, verification will fail. +// +// Providing this function multiple times will concatenate the provided +// CertificateIdentity to the list of identities being checked. +// +// If all of the provided CertificateIdentities fail to match the Fulcio +// certificate, then verification will fail. If *any* CertificateIdentity +// matches, then verification will succeed. Therefore, each CertificateIdentity +// provided to this function must define a "sufficient" identity to trust. +// +// The CertificateIdentity struct allows callers to specify: +// - The exact value, or Regexp, of the SubjectAlternativeName +// - The exact value of any Fulcio OID X.509 extension, i.e. Issuer +// +// For convenience, consult the NewShortCertificateIdentity function. +func WithCertificateIdentity(identity CertificateIdentity) PolicyOption { + return func(p *PolicyConfig) error { + if p.weDoNotExpectIdentities { + return errors.New("can't use WithCertificateIdentity while using WithoutIdentitiesUnsafe") + } + if p.weExpectSigningKey { + return errors.New("can't use WithCertificateIdentity while using WithKey") + } + + p.certificateIdentities = append(p.certificateIdentities, identity) + return nil + } +} + +// WithKey allows the caller of Verify to require the SignedEntity being +// verified was signed with a key and not a certificate. +func WithKey() PolicyOption { + return func(p *PolicyConfig) error { + if len(p.certificateIdentities) > 0 { + return errors.New("can't use WithKey while using WithCertificateIdentity") + } + + p.weExpectSigningKey = true + p.weDoNotExpectIdentities = true + return nil + } +} + +// WithoutArtifactUnsafe allows the caller of Verify to skip checking whether +// the SignedEntity was created from, or references, an artifact. +// +// WithoutArtifactUnsafe can only be used with SignedEntities that contain a +// DSSE envelope. If the the SignedEntity has a MessageSignature, providing +// this policy option will cause verification to always fail, since +// MessageSignatures can only be verified in the presence of an Artifact or +// artifact digest. See WithArtifact/WithArtifactDigest for more informaiton. +// +// Do not use this function unless you know what you are doing! +// +// As the name implies, using WithoutArtifactUnsafe is not safe: outside of +// exceptional circumstances, SignedEntities should always be verified with +// an artifact. +func WithoutArtifactUnsafe() ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if p.verifyArtifact || p.verifyArtifactDigest { + return errors.New("can't use WithoutArtifactUnsafe while using WithArtifact or WithArtifactDigest") + } + + p.weDoNotExpectAnArtifact = true + return nil + } +} + +// WithArtifact allows the caller of Verify to enforce that the SignedEntity +// being verified was created from, or references, a given artifact. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// calculated from the given artifact, and compared to the digest in the +// envelope's statement. +func WithArtifact(artifact io.Reader) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if p.verifyArtifact || p.verifyArtifactDigest { + return errors.New("only one invocation of WithArtifact/WithArtifactDigest is allowed") + } + + if p.weDoNotExpectAnArtifact { + return errors.New("can't use WithArtifact while using WithoutArtifactUnsafe") + } + + p.verifyArtifact = true + p.artifact = artifact + return nil + } +} + +// WithArtifactDigest allows the caller of Verify to enforce that the +// SignedEntity being verified was created for a given artifact digest. +// +// If the SignedEntity contains a MessageSignature that was signed using the +// ED25519 algorithm, then providing only an artifactDigest will fail; the +// whole artifact must be provided. Use WithArtifact instead. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// compared to the digest in the envelope's statement. +func WithArtifactDigest(algorithm string, artifactDigest []byte) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if p.verifyArtifact || p.verifyArtifactDigest { + return errors.New("only one invocation of WithArtifact/WithArtifactDigest is allowed") + } + + if p.weDoNotExpectAnArtifact { + return errors.New("can't use WithArtifactDigest while using WithoutArtifactUnsafe") + } + + p.verifyArtifactDigest = true + p.artifactDigestAlgorithm = algorithm + p.artifactDigest = artifactDigest + return nil + } +} + +// Verify checks the cryptographic integrity of a given SignedEntity according +// to the options configured in the NewSignedEntityVerifier. Its purpose is to +// determine whether the SignedEntity was created by a Sigstore deployment we +// trust, as defined by keys in our TrustedMaterial. +// +// If the SignedEntity contains a MessageSignature, then the artifact or its +// digest must be provided to the Verify function, as it is required to verify +// the signature. See WithArtifact and WithArtifactDigest for more details. +// +// If and only if verification is successful, Verify will return a +// VerificationResult struct whose contents' integrity have been verified. +// Verify may then verify the contents of the VerificationResults using supplied +// PolicyOptions. See WithCertificateIdentity for more details. +// +// Callers of this function SHOULD ALWAYS: +// - (if the signed entity has a certificate) verify that its Subject Alternate +// Name matches a trusted identity, and that its OID Issuer field matches an +// expected value +// - (if the signed entity has a dsse envelope) verify that the envelope's +// statement's subject matches the artifact being verified +func (v *SignedEntityVerifier) Verify(entity SignedEntity, pb PolicyBuilder) (*VerificationResult, error) { + policy, err := pb.BuildConfig() + if err != nil { + return nil, fmt.Errorf("failed to build policy: %w", err) + } + + // Let's go by the spec: https://docs.google.com/document/d/1kbhK2qyPPk8SLavHzYSDM8-Ueul9_oxIMVFuWMWKz0E/edit#heading=h.g11ovq2s1jxh + // > ## Transparency Log Entry + verifiedTlogTimestamps, err := v.VerifyTransparencyLogInclusion(entity) + if err != nil { + return nil, fmt.Errorf("failed to verify log inclusion: %w", err) + } + + // > ## Establishing a Time for the Signature + // > First, establish a time for the signature. This timestamp is required to validate the certificate chain, so this step comes first. + verifiedTimestamps, err := v.VerifyObserverTimestamps(entity, verifiedTlogTimestamps) + if err != nil { + return nil, fmt.Errorf("failed to verify timestamps: %w", err) + } + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, fmt.Errorf("failed to fetch verification content: %w", err) + } + + var signedWithCertificate bool + var certSummary certificate.Summary + + // If the bundle was signed with a long-lived key, and does not have a Fulcio certificate, + // then skip the certificate verification steps + if leafCert := verificationContent.Certificate(); leafCert != nil { + if policy.WeExpectSigningKey() { + return nil, errors.New("expected key signature, not certificate") + } + + signedWithCertificate = true + + // Get the summary before modifying the cert extensions + certSummary, err = certificate.SummarizeCertificate(leafCert) + if err != nil { + return nil, fmt.Errorf("failed to summarize certificate: %w", err) + } + + // From spec: + // > ## Certificate + // > … + // > The Verifier MUST perform certification path validation (RFC 5280 §6) of the certificate chain with the pre-distributed Fulcio root certificate(s) as a trust anchor, but with a fake “current time.” If a timestamp from the timestamping service is available, the Verifier MUST perform path validation using the timestamp from the Timestamping Service. If a timestamp from the Transparency Service is available, the Verifier MUST perform path validation using the timestamp from the Transparency Service. If both are available, the Verifier performs path validation twice. If either fails, verification fails. + + // Go does not support the OtherName GeneralName SAN extension. If + // Fulcio issued the certificate with an OtherName SAN, it will be + // handled by SummarizeCertificate above, and it must be removed here + // or the X.509 verification will fail. + if len(leafCert.UnhandledCriticalExtensions) > 0 { + var unhandledExts []asn1.ObjectIdentifier + for _, oid := range leafCert.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + unhandledExts = append(unhandledExts, oid) + } + } + leafCert.UnhandledCriticalExtensions = unhandledExts + } + + var chains [][]*x509.Certificate + for _, verifiedTs := range verifiedTimestamps { + // verify the leaf certificate against the root + chains, err = VerifyLeafCertificate(verifiedTs.Timestamp, leafCert, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify leaf certificate: %w", err) + } + } + + // From spec: + // > Unless performing online verification (see §Alternative Workflows), the Verifier MUST extract the SignedCertificateTimestamp embedded in the leaf certificate, and verify it as in RFC 9162 §8.1.3, using the verification key from the Certificate Transparency Log. + + if v.config.requireSCTs { + err = VerifySignedCertificateTimestamp(chains, v.config.ctlogEntriesThreshold, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify signed certificate timestamp: %w", err) + } + } + } + + // From spec: + // > ## Signature Verification + // > The Verifier MUST verify the provided signature for the constructed payload against the key in the leaf of the certificate chain. + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, fmt.Errorf("failed to fetch signature content: %w", err) + } + + if policy.WeExpectAnArtifact() { + switch { + case policy.verifyArtifact: + err = VerifySignatureWithArtifact(sigContent, verificationContent, v.trustedMaterial, policy.artifact) + case policy.verifyArtifactDigest: + err = VerifySignatureWithArtifactDigest(sigContent, verificationContent, v.trustedMaterial, policy.artifactDigest, policy.artifactDigestAlgorithm) + default: + // should never happen, but just in case: + err = errors.New("no artifact or artifact digest provided") + } + } else { + // verifying with artifact has been explicitly turned off, so just check + // the signature on the dsse envelope: + err = VerifySignature(sigContent, verificationContent, v.trustedMaterial) + } + + if err != nil { + return nil, fmt.Errorf("failed to verify signature: %w", err) + } + + // Hooray! We've verified all of the entity's constituent parts! 🎉 🥳 + // Now we can construct the results object accordingly. + result := NewVerificationResult() + if signedWithCertificate { + result.Signature = &SignatureVerificationResult{ + Certificate: &certSummary, + } + } + + // SignatureContent can be either an Envelope or a MessageSignature. + // If it's an Envelope, let's pop the Statement for our results: + if envelope := sigContent.EnvelopeContent(); envelope != nil { + stmt, err := envelope.Statement() + if err != nil { + return nil, fmt.Errorf("failed to fetch envelope statement: %w", err) + } + + result.Statement = stmt + } + + result.VerifiedTimestamps = verifiedTimestamps + + // Now that the signed entity's crypto material has been verified, and the + // result struct has been constructed, we can optionally enforce some + // additional policies: + // -------------------- + + // From ## Certificate section, + // >The Verifier MUST then check the certificate against the verification policy. Details on how to do this depend on the verification policy, but the Verifier SHOULD check the Issuer X.509 extension (OID 1.3.6.1.4.1.57264.1.1) at a minimum, and will in most cases check the SubjectAlternativeName as well. See Spec: Fulcio §TODO for example checks on the certificate. + if policy.WeExpectIdentities() { + if !signedWithCertificate { + // We got asked to verify identities, but the entity was not signed with + // a certificate. That's a problem! + return nil, errors.New("can't verify certificate identities: entity was not signed with a certificate") + } + + if len(policy.certificateIdentities) == 0 { + return nil, errors.New("can't verify certificate identities: no identities provided") + } + + matchingCertID, err := policy.certificateIdentities.Verify(certSummary) + if err != nil { + return nil, fmt.Errorf("failed to verify certificate identity: %w", err) + } + + result.VerifiedIdentity = matchingCertID + } + + return result, nil +} + +// VerifyTransparencyLogInclusion verifies TlogEntries if expected. Optionally returns +// a list of verified timestamps from the log integrated timestamps when verifying +// with observer timestamps. +// TODO: Return a different verification result for logs specifically (also for #48) +func (v *SignedEntityVerifier) VerifyTransparencyLogInclusion(entity SignedEntity) ([]TimestampVerificationResult, error) { + verifiedTimestamps := []TimestampVerificationResult{} + + if v.config.requireTlogEntries { + // log timestamps should be verified if with WithIntegratedTimestamps or WithObserverTimestamps is used + verifiedTlogTimestamps, err := VerifyArtifactTransparencyLog(entity, v.trustedMaterial, v.config.tlogEntriesThreshold, + v.config.requireIntegratedTimestamps || v.config.requireObserverTimestamps) + if err != nil { + return nil, err + } + + for _, vts := range verifiedTlogTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "Tlog", URI: vts.URI, Timestamp: vts.Time}) + } + } + + return verifiedTimestamps, nil +} + +// VerifyObserverTimestamps verifies RFC3161 signed timestamps, and verifies +// that timestamp thresholds are met with log entry integrated timestamps, +// signed timestamps, or a combination of both. The returned timestamps +// can be used to verify short-lived certificates. +// logTimestamps may be populated with verified log entry integrated timestamps +// In order to be verifiable, a SignedEntity must have at least one verified +// "observer timestamp". +func (v *SignedEntityVerifier) VerifyObserverTimestamps(entity SignedEntity, logTimestamps []TimestampVerificationResult) ([]TimestampVerificationResult, error) { + verifiedTimestamps := []TimestampVerificationResult{} + + // From spec: + // > … if verification or timestamp parsing fails, the Verifier MUST abort + if v.config.requireSignedTimestamps { + verifiedSignedTimestamps, err := VerifyTimestampAuthorityWithThreshold(entity, v.trustedMaterial, v.config.signedTimestampThreshold) + if err != nil { + return nil, err + } + for _, vts := range verifiedSignedTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time}) + } + } + + if v.config.requireIntegratedTimestamps { + if len(logTimestamps) < v.config.integratedTimeThreshold { + return nil, fmt.Errorf("threshold not met for verified log entry integrated timestamps: %d < %d", len(logTimestamps), v.config.integratedTimeThreshold) + } + verifiedTimestamps = append(verifiedTimestamps, logTimestamps...) + } + + if v.config.requireObserverTimestamps { + verifiedSignedTimestamps, err := VerifyTimestampAuthority(entity, v.trustedMaterial) + if err != nil { + return nil, err + } + + // check threshold for both RFC3161 and log timestamps + tsCount := len(verifiedSignedTimestamps) + len(logTimestamps) + if tsCount < v.config.observerTimestampThreshold { + return nil, fmt.Errorf("threshold not met for verified signed & log entry integrated timestamps: %d < %d", + tsCount, v.config.observerTimestampThreshold) + } + + // append all timestamps + verifiedTimestamps = append(verifiedTimestamps, logTimestamps...) + for _, vts := range verifiedSignedTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time}) + } + } + + if v.config.useCurrentTime { + // use current time to verify certificate if no signed timestamps are provided + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "CurrentTime", URI: "", Timestamp: time.Now()}) + } + + if len(verifiedTimestamps) == 0 { + return nil, fmt.Errorf("no valid observer timestamps found") + } + + return verifiedTimestamps, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go new file mode 100644 index 00000000..a52b0a17 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go @@ -0,0 +1,148 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "crypto" + "encoding/hex" + "errors" + "fmt" + + "github.com/sigstore/sigstore/pkg/signature" + + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" +) + +const maxAllowedTlogEntries = 32 + +// VerifyArtifactTransparencyLog verifies that the given entity has been logged +// in the transparency log and that the log entry is valid. +// +// The threshold parameter is the number of unique transparency log entries +// that must be verified. +func VerifyArtifactTransparencyLog(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive + entries, err := entity.TlogEntries() + if err != nil { + return nil, err + } + + // limit the number of tlog entries to prevent DoS + if len(entries) > maxAllowedTlogEntries { + return nil, fmt.Errorf("too many tlog entries: %d > %d", len(entries), maxAllowedTlogEntries) + } + + // disallow duplicate entries, as a malicious actor could use duplicates to bypass the threshold + for i := 0; i < len(entries); i++ { + for j := i + 1; j < len(entries); j++ { + if entries[i].LogKeyID() == entries[j].LogKeyID() && entries[i].LogIndex() == entries[j].LogIndex() { + return nil, errors.New("duplicate tlog entries found") + } + } + } + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, err + } + + entitySignature := sigContent.Signature() + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, err + } + + verifiedTimestamps := []root.Timestamp{} + logEntriesVerified := 0 + + for _, entry := range entries { + err := tlog.ValidateEntry(entry) + if err != nil { + return nil, err + } + + rekorLogs := trustedMaterial.RekorLogs() + keyID := entry.LogKeyID() + hex64Key := hex.EncodeToString([]byte(keyID)) + tlogVerifier, ok := trustedMaterial.RekorLogs()[hex64Key] + if !ok { + // skip entries the trust root cannot verify + continue + } + + if !entry.HasInclusionPromise() && !entry.HasInclusionProof() { + return nil, fmt.Errorf("entry must contain an inclusion proof and/or promise") + } + if entry.HasInclusionPromise() { + err = tlog.VerifySET(entry, rekorLogs) + if err != nil { + // skip entries the trust root cannot verify + continue + } + if trustIntegratedTime { + verifiedTimestamps = append(verifiedTimestamps, root.Timestamp{Time: entry.IntegratedTime(), URI: tlogVerifier.BaseURL}) + } + } + if entry.HasInclusionProof() { + verifier, err := getVerifier(tlogVerifier.PublicKey, tlogVerifier.SignatureHashFunc) + if err != nil { + return nil, err + } + + err = tlog.VerifyInclusion(entry, *verifier) + if err != nil { + return nil, err + } + // DO NOT use timestamp with only an inclusion proof, because it is not signed metadata + } + + // Ensure entry signature matches signature from bundle + if !bytes.Equal(entry.Signature(), entitySignature) { + return nil, errors.New("transparency log signature does not match") + } + + // Ensure entry certificate matches bundle certificate + if !verificationContent.CompareKey(entry.PublicKey(), trustedMaterial) { + return nil, errors.New("transparency log certificate does not match") + } + + // TODO: if you have access to artifact, check that it matches body subject + + // Check tlog entry time against bundle certificates + if !verificationContent.ValidAtTime(entry.IntegratedTime(), trustedMaterial) { + return nil, errors.New("integrated time outside certificate validity") + } + + // successful log entry verification + logEntriesVerified++ + } + + if logEntriesVerified < logThreshold { + return nil, fmt.Errorf("not enough verified log entries from transparency log: %d < %d", logEntriesVerified, logThreshold) + } + + return verifiedTimestamps, nil +} + +func getVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (*signature.Verifier, error) { + verifier, err := signature.LoadVerifier(publicKey, hashFunc) + if err != nil { + return nil, err + } + + return &verifier, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go new file mode 100644 index 00000000..92f82894 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go @@ -0,0 +1,99 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "errors" + "fmt" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +const maxAllowedTimestamps = 32 + +// VerifyTimestampAuthority verifies that the given entity has been timestamped +// by a trusted timestamp authority and that the timestamp is valid. +func VerifyTimestampAuthority(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, error) { //nolint:revive + signedTimestamps, err := entity.Timestamps() + if err != nil { + return nil, err + } + + // limit the number of timestamps to prevent DoS + if len(signedTimestamps) > maxAllowedTimestamps { + return nil, fmt.Errorf("too many signed timestamps: %d > %d", len(signedTimestamps), maxAllowedTimestamps) + } + + // disallow duplicate timestamps, as a malicious actor could use duplicates to bypass the threshold + for i := 0; i < len(signedTimestamps); i++ { + for j := i + 1; j < len(signedTimestamps); j++ { + if bytes.Equal(signedTimestamps[i], signedTimestamps[j]) { + return nil, errors.New("duplicate timestamps found") + } + } + } + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, err + } + + signatureBytes := sigContent.Signature() + + verifiedTimestamps := []*root.Timestamp{} + for _, timestamp := range signedTimestamps { + verifiedSignedTimestamp, err := verifySignedTimestamp(timestamp, signatureBytes, trustedMaterial) + + // Timestamps from unknown source are okay, but don't count as verified + if err != nil { + continue + } + + verifiedTimestamps = append(verifiedTimestamps, verifiedSignedTimestamp) + } + + return verifiedTimestamps, nil +} + +// VerifyTimestampAuthority verifies that the given entity has been timestamped +// by a trusted timestamp authority and that the timestamp is valid. +// +// The threshold parameter is the number of unique timestamps that must be +// verified. +func VerifyTimestampAuthorityWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive + verifiedTimestamps, err := VerifyTimestampAuthority(entity, trustedMaterial) + if err != nil { + return nil, err + } + if len(verifiedTimestamps) < threshold { + return nil, fmt.Errorf("threshold not met for verified signed timestamps: %d < %d", len(verifiedTimestamps), threshold) + } + return verifiedTimestamps, nil +} + +func verifySignedTimestamp(signedTimestamp []byte, signatureBytes []byte, trustedMaterial root.TrustedMaterial) (*root.Timestamp, error) { + timestampAuthorities := trustedMaterial.TimestampingAuthorities() + + // Iterate through TSA certificate authorities to find one that verifies + for _, tsa := range timestampAuthorities { + ts, err := tsa.Verify(signedTimestamp, signatureBytes) + if err == nil { + return ts, nil + } + } + + return nil, errors.New("unable to verify signed timestamps") +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go index a8b2805e..1e2fa031 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go @@ -20,7 +20,6 @@ import ( "crypto" "crypto/ecdsa" "crypto/ed25519" - "crypto/elliptic" "crypto/rsa" "crypto/sha1" // nolint:gosec "crypto/x509" @@ -104,15 +103,15 @@ func EqualKeys(first, second crypto.PublicKey) error { switch pub := first.(type) { case *rsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "rsa")) + return errors.New(genErrMsg(first, second, "rsa")) } case *ecdsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ecdsa")) + return errors.New(genErrMsg(first, second, "ecdsa")) } case ed25519.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ed25519")) + return errors.New(genErrMsg(first, second, "ed25519")) } default: return errors.New("unsupported key type") @@ -137,47 +136,50 @@ func genErrMsg(first, second crypto.PublicKey, keyType string) string { // ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key. func ValidatePubKey(pub crypto.PublicKey) error { + // goodkey policy enforces: + // * RSA + // * Size of key: 2048 <= size <= 4096, size % 8 = 0 + // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) + // * Small primes check for modulus + // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) + // * Key is easily factored with Fermat's factorization method + // * EC + // * Public key Q is not the identity element (Ø) + // * Public key Q's x and y are within [0, p-1] + // * Public key Q is on the curve + // * Public key Q's order matches the subgroups (nQ = Ø) + allowedKeys := &goodkey.AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + ECDSAP521: true, + } + cfg := &goodkey.Config{ + FermatRounds: 100, + AllowedKeys: allowedKeys, + } + p, err := goodkey.NewPolicy(cfg, nil) + if err != nil { + // Should not occur, only chances to return errors are if fermat rounds + // are <0 or when loading blocked/weak keys from disk (not used here) + return errors.New("unable to initialize key policy") + } + switch pk := pub.(type) { case *rsa.PublicKey: - // goodkey policy enforces: - // * Size of key: 2048 <= size <= 4096, size % 8 = 0 - // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) - // * Small primes check for modulus - // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) - // * Key is easily factored with Fermat's factorization method - p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil) - if err != nil { - // Should not occur, only chances to return errors are if fermat rounds - // are <0 or when loading blocked/weak keys from disk (not used here) - return errors.New("unable to initialize key policy") - } // ctx is unused return p.GoodKey(context.Background(), pub) case *ecdsa.PublicKey: - // Unable to use goodkey policy because P-521 curve is not supported - return validateEcdsaKey(pk) + // ctx is unused + return p.GoodKey(context.Background(), pub) case ed25519.PublicKey: return validateEd25519Key(pk) } return errors.New("unsupported public key type") } -// Enforce that the ECDSA key curve is one of: -// * NIST P-256 (secp256r1, prime256v1) -// * NIST P-384 -// * NIST P-521. -// Other EC curves, like secp256k1, are not supported by Go. -func validateEcdsaKey(pub *ecdsa.PublicKey) error { - switch pub.Curve { - case elliptic.P224(): - return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521") - case elliptic.P256(), elliptic.P384(), elliptic.P521(): - return nil - default: - return fmt.Errorf("unexpected ec curve") - } -} - // No validations currently, ED25519 supports only one key size. func validateEd25519Key(_ ed25519.PublicKey) error { return nil diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go new file mode 100644 index 00000000..0612d8f7 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go @@ -0,0 +1,206 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "fmt" + + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" +) + +// PublicKeyType represents the public key algorithm for a given signature algorithm. +type PublicKeyType uint + +const ( + // RSA public key + RSA PublicKeyType = iota + // ECDSA public key + ECDSA + // ED25519 public key + ED25519 +) + +// RSAKeySize represents the size of an RSA public key in bits. +type RSAKeySize int + +type algorithmDetails struct { + // knownAlgorithm is the signature algorithm that the following details refer to. + knownAlgorithm v1.PublicKeyDetails + + // keyType is the public key algorithm being used. + keyType PublicKeyType + + // hashType is the hash algorithm being used. + hashType crypto.Hash + + // extraKeyParams contains any extra parameters required to check a given public key against this entry. + // + // The underlying type of these parameters is dependent on the keyType. + // For example, ECDSA algorithms will store an elliptic curve here whereas, RSA keys will store the key size. + // Algorithms that don't require any extra parameters leave this set to nil. + extraKeyParams interface{} + + // flagValue is a string representation of the signature algorithm that follows the naming conventions of CLI + // arguments that are used for Sigstore services. + flagValue string +} + +func (a algorithmDetails) GetRSAKeySize() (RSAKeySize, error) { + if a.keyType != RSA { + return 0, fmt.Errorf("unable to retrieve RSA key size for key type: %T", a.keyType) + } + rsaKeySize, ok := a.extraKeyParams.(RSAKeySize) + if !ok { + // This should be unreachable. + return 0, fmt.Errorf("unable to retrieve key size for RSA, malformed algorithm details?: %T", a.keyType) + } + return rsaKeySize, nil +} + +func (a algorithmDetails) GetECDSACurve() (*elliptic.Curve, error) { + if a.keyType != ECDSA { + return nil, fmt.Errorf("unable to retrieve ECDSA curve for key type: %T", a.keyType) + } + ecdsaCurve, ok := a.extraKeyParams.(elliptic.Curve) + if !ok { + // This should be unreachable. + return nil, fmt.Errorf("unable to retrieve curve for ECDSA, malformed algorithm details?: %T", a.keyType) + } + return &ecdsaCurve, nil +} + +func (a algorithmDetails) checkKey(pubKey crypto.PublicKey) (bool, error) { + switch a.keyType { + case RSA: + rsaKey, ok := pubKey.(*rsa.PublicKey) + if !ok { + return false, nil + } + keySize, err := a.GetRSAKeySize() + if err != nil { + return false, err + } + return rsaKey.Size()*8 == int(keySize), nil + case ECDSA: + ecdsaKey, ok := pubKey.(*ecdsa.PublicKey) + if !ok { + return false, nil + } + curve, err := a.GetECDSACurve() + if err != nil { + return false, err + } + return ecdsaKey.Curve == *curve, nil + case ED25519: + _, ok := pubKey.(ed25519.PublicKey) + return ok, nil + } + return false, fmt.Errorf("unrecognized key type: %T", a.keyType) +} + +func (a algorithmDetails) checkHash(hashType crypto.Hash) bool { + return a.hashType == hashType +} + +// Note that deprecated options in PublicKeyDetails are not included in this +// list, including PKCS1v1.5 encoded RSA. Refer to the v1.PublicKeyDetails enum +// for more details. +var supportedAlgorithms = []algorithmDetails{ + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, RSA, crypto.SHA256, RSAKeySize(2048), "rsa-sign-pkcs1-2048-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, RSA, crypto.SHA256, RSAKeySize(3072), "rsa-sign-pkcs1-3072-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, RSA, crypto.SHA256, RSAKeySize(4096), "rsa-sign-pkcs1-4096-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, RSA, crypto.SHA256, RSAKeySize(2048), "rsa-sign-pss-2048-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, RSA, crypto.SHA256, RSAKeySize(3072), "rsa-sign-pss-3072-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, RSA, crypto.SHA256, RSAKeySize(4096), "rsa-sign-pss-4092-sha256"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, ECDSA, crypto.SHA256, elliptic.P256(), "ecdsa-sha2-256-nistp256"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, ECDSA, crypto.SHA384, elliptic.P384(), "ecdsa-sha2-384-nistp384"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512, ECDSA, crypto.SHA512, elliptic.P521(), "ecdsa-sha2-512-nistp521"}, + {v1.PublicKeyDetails_PKIX_ED25519, ED25519, crypto.Hash(0), nil, "ed25519"}, + {v1.PublicKeyDetails_PKIX_ED25519_PH, ED25519, crypto.SHA512, nil, "ed25519-ph"}, +} + +// AlgorithmRegistryConfig represents a set of permitted algorithms for a given Sigstore service or component. +// +// Individual services may wish to restrict what algorithms are allowed to a subset of what is covered in the algorithm +// registry (represented by v1.PublicKeyDetails). +type AlgorithmRegistryConfig struct { + permittedAlgorithms []algorithmDetails +} + +// getAlgorithmDetails retrieves a set of details for a given v1.PublicKeyDetails flag that allows users to +// introspect the public key algorithm, hash algorithm and more. +func getAlgorithmDetails(knownSignatureAlgorithm v1.PublicKeyDetails) (*algorithmDetails, error) { + for _, detail := range supportedAlgorithms { + if detail.knownAlgorithm == knownSignatureAlgorithm { + return &detail, nil + } + } + return nil, fmt.Errorf("could not find algorithm details for known signature algorithm: %s", knownSignatureAlgorithm) +} + +// NewAlgorithmRegistryConfig creates a new AlgorithmRegistryConfig for a set of permitted signature algorithms. +func NewAlgorithmRegistryConfig(algorithmConfig []v1.PublicKeyDetails) (*AlgorithmRegistryConfig, error) { + permittedAlgorithms := make([]algorithmDetails, 0, len(supportedAlgorithms)) + for _, algorithm := range algorithmConfig { + a, err := getAlgorithmDetails(algorithm) + if err != nil { + return nil, err + } + permittedAlgorithms = append(permittedAlgorithms, *a) + } + return &AlgorithmRegistryConfig{permittedAlgorithms: permittedAlgorithms}, nil +} + +// IsAlgorithmPermitted checks whether a given public key/hash algorithm combination is permitted by a registry config. +func (registryConfig AlgorithmRegistryConfig) IsAlgorithmPermitted(key crypto.PublicKey, hash crypto.Hash) (bool, error) { + for _, algorithm := range registryConfig.permittedAlgorithms { + keyMatch, err := algorithm.checkKey(key) + if err != nil { + return false, err + } + if keyMatch && algorithm.checkHash(hash) { + return true, nil + } + } + return false, nil +} + +// FormatSignatureAlgorithmFlag formats a v1.PublicKeyDetails to a string that conforms to the naming conventions +// of CLI arguments that are used for Sigstore services. +func FormatSignatureAlgorithmFlag(algorithm v1.PublicKeyDetails) (string, error) { + for _, a := range supportedAlgorithms { + if a.knownAlgorithm == algorithm { + return a.flagValue, nil + } + } + return "", fmt.Errorf("could not find matching flag for signature algorithm: %s", algorithm) +} + +// ParseSignatureAlgorithmFlag parses a string produced by FormatSignatureAlgorithmFlag and returns the corresponding +// v1.PublicKeyDetails value. +func ParseSignatureAlgorithmFlag(flag string) (v1.PublicKeyDetails, error) { + for _, a := range supportedAlgorithms { + if a.flagValue == flag { + return a.knownAlgorithm, nil + } + } + return v1.PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED, fmt.Errorf("could not find matching signature algorithm for flag: %s", flag) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/README.md b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/README.md new file mode 100644 index 00000000..17c2a3e8 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/README.md @@ -0,0 +1,109 @@ +# CLI Plugin + +This is a package and module for using custom KMS plugins as separate executables. +It is intended to be used by cosign, but you may use this in your own programs that import sigstore. + +## Design + +We follow [kubectl's style](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/#writing-kubectl-plugins) of plugins. Any language that can create CLI programs can be used to make a plugin. + +### Usage + +Plugins are separate programs on your system's PATH, named in the scheme `sigstore-kms-[name]`, like `sigstore-kms-my-hsm`. They can be invoked with cosign like `cosign [sub-command] --key "my-hsm://my-key-id" ... +` + +### Protocol + +The main program will invoke the program with these specifications: + +* stdin + * Data to be signed or verified. +* arg 1 + * A number identifying the version of this protocol. + * In the future we may change this protocol, either the encoding or the formatting of argument and return values. + Protocol changes will result in a major-version bump for the library. + When changing the protocol, new versions of sigstore will not maintain backwards compatibility with + previous protocol versions. If a plugin author wishes, they may branch their plugin program’s behaviour + to be compatible with multiple versions of the protocol, or multiple major versions of sigstore and cosign. +* arg 2 + * JSON of initialization options and method arguments. +* stdout + * JSON of method return values. + +See [./common/interface.go](./common/interface.go) and [./common/interface_test.go](./common/interface_test.go) for the full JSON schema. + +The plugin program must first exit before sigstore begins parsing responses. + +#### Error Handling + +The plugin program’s stderr will be redirected to the main program’s stderr. This way, the main program may also see the plugin program’s debug messages. + +Plugin authors may return errors with `PluginResp.ErrorMessage`, but the plugin's exit status will be ignored. + +### Implementation + +Plugin authors must implement the `kms.SignerVerifier` interface methods in their chosen language. Each method will invoke your program once, and the response will be parsed from stdout. + +`PluginClient.CryptoSigner()` will return object that is a wrapper around `PluginClient`, so plugin authors need not do a full implementation of `SignerVerifier()`. + +Exit status is ignored. Your program's stderr will be redirected to the main program, and errors you wish to return must be serialized in `PluginResp.ErrorMessage` in stdout. + +For authors using Go, we vend some helper functions to help you get started. See [handler](./handler/README.md) + +## Development + +Changes to the `SignerVerifier` interface are to be handled in [./signer.go's](./signer.go) `PluginClient` and [./handler/dispatch.go's](./handler/dispatch.go) `Dispatch()`. + +### Adding New Methods or Method Options + +Adding new methods or options are *not* necessarily breaking changes to the schemas, so we may consider these to be minor version increments, both to the protocol version and the sigstore version. + +### Removing Methods + +Removing methods, or altering their signatures will break the schemas and will require major version increments, both to the protocol version and the sigstore version. + +### Example Plugin + +We have an example plugin in [test/cliplugin/localkms](../../../../test/cliplugin/localkms). + +1. Compile cosign and the plugin + + ```shell + go build -C cosign/cmd/cosign -o `pwd`/cosign-cli + go build -C sigstore/test/cliplugin/localkms -o `pwd`/sigstore-kms-localkms + ``` + +2. Sign some data + + With our example, you need to first create the key. + + ```shell + export PATH="$PATH:`pwd`" + cosign-cli generate-key-pair --kms localkms://`pwd`/key.pem + cat cosign.pub + ``` + + Sign some data. + + ```shell + export PATH="$PATH:`pwd`" + echo "my-data" > blob.txt + cosign-cli sign-blob --tlog-upload=false --key localkms://`pwd`/key.pem blob.txt + ``` + +### Testing + +Unit tests against an example plugin program are in [./signer_program_test.go](./signer_program_test.go). +Compile the plugin and invoke unit tests with + +```shell +make test-signer-program +``` + +Or invoke the unit tests with your own pre-compiled plugin program like + + +```shell +export PATH=$PATH:[folder containing plugin program] +go test -C ./pkg/signature/kms/cliplugin -v -tags=signer_program ./... -key-resource-id [my-kms]://[my key ref] +``` diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/client.go new file mode 100644 index 00000000..213a926c --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/client.go @@ -0,0 +1,76 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cliplugin implements the plugin functionality. +package cliplugin + +import ( + "context" + "crypto" + "errors" + "fmt" + "os/exec" + "strings" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common" + "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding" + "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/internal/signerverifier" +) + +const ( + // PluginBinaryPrefix is the prefix for all plugin binaries. e.g., sigstore-kms-my-hsm. + PluginBinaryPrefix = "sigstore-kms-" +) + +// ErrorInputKeyResourceID indicates a problem parsing the key resource id. +var ErrorInputKeyResourceID = errors.New("parsing input key resource id") + +// LoadSignerVerifier creates a PluginClient with these InitOptions. +// If the plugin executable does not exist, then it returns exec.ErrNotFound. +func LoadSignerVerifier(ctx context.Context, inputKeyResourceID string, hashFunc crypto.Hash, opts ...signature.RPCOption) (signerverifier.SignerVerifier, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + executable, keyResourceID, err := getPluginExecutableAndKeyResourceID(inputKeyResourceID) + if err != nil { + return nil, err + } + if _, err := exec.LookPath(executable); err != nil { + return nil, err + } + initOptions := &common.InitOptions{ + ProtocolVersion: common.ProtocolVersion, + KeyResourceID: keyResourceID, + HashFunc: hashFunc, + RPCOptions: encoding.PackRPCOptions(opts), + } + if deadline, ok := ctx.Deadline(); ok { + initOptions.CtxDeadline = &deadline + } + pluginClient := newPluginClient(executable, initOptions, makeCmd) + return pluginClient, nil +} + +// getPluginExecutableAndKeyResourceID parses the inputKeyResourceID into the plugin executable and the actual keyResourceID. +func getPluginExecutableAndKeyResourceID(inputKeyResourceID string) (string, string, error) { + parts := strings.SplitN(inputKeyResourceID, "://", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("%w: expected format: [plugin name]://[key ref], got: %s", ErrorInputKeyResourceID, inputKeyResourceID) + } + pluginName, keyResourceID := parts[0], parts[1] + executable := PluginBinaryPrefix + pluginName + return executable, keyResourceID, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/cmd.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/cmd.go new file mode 100644 index 00000000..f73c14f9 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/cmd.go @@ -0,0 +1,45 @@ +// +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cliplugin implements the plugin functionality. +package cliplugin + +import ( + "context" + "io" + "os/exec" +) + +// cmd is an interface for os/exec.Cmd. +type cmd interface { + Output() ([]byte, error) +} + +// makeCmdFunc is a type for a function that can create a cmd. +type makeCmdFunc func(ctx context.Context, stdin io.Reader, stderr io.Writer, name string, args ...string) cmd + +// makeCmd is an implementation of makeCmdFunc. +func makeCmd(ctx context.Context, stdin io.Reader, stderr io.Writer, name string, args ...string) cmd { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Stdin = stdin + cmd.Stderr = stderr + return cmd +} + +// cmdExitError is an interface for os/exec.ExitError. +type cmdExitError interface { + Error() string + ExitCode() int +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common/interface.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common/interface.go new file mode 100644 index 00000000..ea9487db --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common/interface.go @@ -0,0 +1,147 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package common defines the JSON schema for plugin arguments and return values. +package common + +import ( + "crypto" + "time" +) + +const ( + // ProtocolVersion is the version of the schema and communication protocol for the plugin system. + // Breaking changes to the PluginClient and this schema necessarily mean major version bumps of + // this ProtocolVersion and the sigstore version. + // Plugin authors may choose to be backwards compatible with older versions. + ProtocolVersion = "v1" + // DefaultAlgorithmMethodName is the MethodName for DefaultAlgorithsm(). + DefaultAlgorithmMethodName = "defaultAlgorithm" + // SupportedAlgorithmsMethodName is the MethodName for SupportedAlgorithms(). + SupportedAlgorithmsMethodName = "supportedAlgorithms" + // CreateKeyMethodName is the MethodName for CreateKey(). + CreateKeyMethodName = "createKey" + // PublicKeyMethodName is the MethodName for PublicKey(). + PublicKeyMethodName = "publicKey" + // SignMessageMethodName is the MethodName for SignMessage(). + SignMessageMethodName = "signMessage" + // VerifySignatureMethodName is the MethodName for VerifySignature(). + VerifySignatureMethodName = "verifySignature" + // CryptoSigner is not to be added to the protocol. + // PluginClient.CryptoSigner() will instead return a wrapper around the plugin. +) + +// PluginArgs contains all the initialization and method arguments to be sent to the plugin as a CLI argument. +type PluginArgs struct { + InitOptions *InitOptions `json:"initOptions"` + *MethodArgs +} + +// InitOptions contains the initial arguments when calling cliplugin.LoadSignerVerifier(). +type InitOptions struct { + // CtxDeadline serializes to RFC 3339. See https://pkg.go.dev/time@go1.23.5#Time.MarshalJSON. e.g, 2025-04-01T02:47:00Z. + CtxDeadline *time.Time `json:"ctxDeadline,omitempty"` + ProtocolVersion string `json:"protocolVersion"` + KeyResourceID string `json:"keyResourceID"` + // HashFunc will serialize to ints according to https://pkg.go.dev/crypto@go1.23.5#Hash. e.g., crypto.SHA256 serializes to 5. + HashFunc crypto.Hash `json:"hashFunc"` + RPCOptions *RPCOptions `json:"rpcOptions"` +} + +// MethodArgs contains the method arguments. MethodName must be specified, +// while any one of the other fields describing method arguments must also be specified. +// Arguments that are io.Readers, like `message` in `SignMessage()` will be sent over stdin. +type MethodArgs struct { + // MethodName specifies which method is intended to be called. + MethodName string `json:"methodName"` + DefaultAlgorithm *DefaultAlgorithmArgs `json:"defaultAlgorithm,omitempty"` + SupportedAlgorithms *SupportedAlgorithmsArgs `json:"supportedAlgorithms,omitempty"` + CreateKey *CreateKeyArgs `json:"createKey,omitempty"` + PublicKey *PublicKeyArgs `json:"publicKey,omitempty"` + SignMessage *SignMessageArgs `json:"signMessage,omitempty"` + VerifySignature *VerifySignatureArgs `json:"verifySignature,omitempty"` +} + +// PluginResp contains the serialized plugin method return values. +type PluginResp struct { + ErrorMessage string `json:"errorMessage,omitempty"` + DefaultAlgorithm *DefaultAlgorithmResp `json:"defaultAlgorithm,omitempty"` + SupportedAlgorithms *SupportedAlgorithmsResp `json:"supportedAlgorithms,omitempty"` + CreateKey *CreateKeyResp `json:"createKey,omitempty"` + PublicKey *PublicKeyResp `json:"publicKey,omitempty"` + SignMessage *SignMessageResp `json:"signMessage,omitempty"` + VerifySignature *VerifySignatureResp `json:"verifySignature,omitempty"` +} + +// DefaultAlgorithmArgs contains the serialized arguments for `DefaultAlgorithm()`. +type DefaultAlgorithmArgs struct{} + +// DefaultAlgorithmResp contains the serialized response for `DefaultAlgorithm()`. +type DefaultAlgorithmResp struct { + DefaultAlgorithm string `json:"defaultAlgorithm"` +} + +// SupportedAlgorithmsArgs contains the serialized arguments for `SupportedAlgorithms()`. +type SupportedAlgorithmsArgs struct{} + +// SupportedAlgorithmsResp contains the serialized response for `SupportedAlgorithms()`. +type SupportedAlgorithmsResp struct { + SupportedAlgorithms []string `json:"supportedAlgorithms"` +} + +// CreateKeyArgs contains the serialized arguments for `CreateKeyArgs()`. +type CreateKeyArgs struct { + // CtxDeadline serializes to RFC 3339. See https://pkg.go.dev/time@go1.23.5#Time.MarshalJSON. e.g, 2025-04-01T02:47:00Z. + CtxDeadline *time.Time `json:"ctxDeadline,omitempty"` + Algorithm string `json:"algorithm"` +} + +// CreateKeyResp contains the serialized response for `CreateKeyResp()`. +type CreateKeyResp struct { + // PublicKeyPEM is a base64 encoding of the Public Key PEM bytes. e.g, []byte("mypem") serializes to "bXlwZW0=". + PublicKeyPEM []byte `json:"publicKeyPEM"` +} + +// PublicKeyArgs contains the serialized response for `PublicKey()`. +type PublicKeyArgs struct { + PublicKeyOptions *PublicKeyOptions `json:"publicKeyOptions"` +} + +// PublicKeyResp contains the serialized response for `PublicKey()`. +type PublicKeyResp struct { + // PublicKeyPEM is a base64 encoding of the Public Key PEM bytes. e.g, []byte("mypem") serializes to "bXlwZW0=". + PublicKeyPEM []byte `json:"publicKeyPEM"` +} + +// SignMessageArgs contains the serialized arguments for `SignMessage()`. +type SignMessageArgs struct { + SignOptions *SignOptions `json:"signOptions"` +} + +// SignMessageResp contains the serialized response for `SignMessage()`. +type SignMessageResp struct { + // Signature is a base64 encoding of the signature bytes. e.g, []byte("any-signature") serializes to "W55LXNpZ25hdHVyZQ==". + Signature []byte `json:"signature"` +} + +// VerifySignatureArgs contains the serialized arguments for `VerifySignature()`. +type VerifySignatureArgs struct { + // Signature is a base64 encoding of the signature bytes. e.g, []byte("any-signature") serializes to "W55LXNpZ25hdHVyZQ==". + Signature []byte `json:"signature"` + VerifyOptions *VerifyOptions `json:"verifyOptions"` +} + +// VerifySignatureResp contains the serialized response for `VerifySignature()`. +type VerifySignatureResp struct{} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common/options.go new file mode 100644 index 00000000..a8752824 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common/options.go @@ -0,0 +1,57 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package common defines the JSON schema for plugin arguments and return values. +package common + +import ( + "crypto" + "time" +) + +// PublicKeyOptions contains the values for signature.PublicKeyOptions. +type PublicKeyOptions struct { + RPCOptions RPCOptions `json:"rpcOptions"` +} + +// SignOptions contains the values for signature.SignOption. +type SignOptions struct { + RPCOptions RPCOptions `json:"rpcOptions"` + MessageOptions MessageOptions `json:"messageOptions"` +} + +// VerifyOptions contains the values for signature.VerifyOption. +type VerifyOptions struct { + RPCOptions RPCOptions `json:"rpcOptions"` + MessageOptions MessageOptions `json:"messageOptions"` +} + +// RPCOptions contains the values for signature.RPCOption. +// We do not use RPCOptions.RPCAuth to avoid sending secrets over CLI to the plugin program. +// The plugin program should instead read secrets with env variables. +type RPCOptions struct { + // CtxDeadline serializes to RFC 3339. See https://pkg.go.dev/time@go1.23.5#Time.MarshalJSON. e.g, 2025-04-01T02:47:00Z. + CtxDeadline *time.Time `json:"ctxDeadline,omitempty"` + KeyVersion *string `json:"keyVersion,omitempty"` + RemoteVerification *bool `json:"remoteVerification,omitempty"` +} + +// MessageOptions contains the values for signature.MessageOption. +type MessageOptions struct { + // Digest is a base64 encoding of the digest bytes. e.g, []byte("anyDigest") serializes to "YW55RGlnZXN0". + Digest *[]byte `json:"digest,omitempty"` + // HashFunc will serialize to ints according to https://pkg.go.dev/crypto@go1.23.5#Hash. e.g., crypto.SHA256 serializes to 5. + HashFunc *crypto.Hash `json:"hashFunc,omitempty"` +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding/options.go new file mode 100644 index 00000000..09e4671a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding/options.go @@ -0,0 +1,197 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoding has helper functions for encoding and decoding some method arguments and return values. +package encoding + +// We have some type assertions that seem like they may panic, but this is just to satisfy +// golanci-lint's forcetypeassert linter. If they were to ever fail, unit tests would also fail. +// We know the asserted types are valid because otherwise we would have compiler failures. + +import ( + "context" + "crypto" + "time" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// PackRPCOptions extracts properties of all of opts into struct ready for serializing. +func PackRPCOptions(opts []signature.RPCOption) *common.RPCOptions { + ctx := context.Background() + var keyVersion string + var remoteVerification bool + for _, opt := range opts { + opt.ApplyContext(&ctx) + opt.ApplyKeyVersion(&keyVersion) + opt.ApplyRemoteVerification(&remoteVerification) + } + var ctxDeadline *time.Time + if deadline, ok := ctx.Deadline(); ok { + ctxDeadline = &deadline + } + return &common.RPCOptions{ + CtxDeadline: ctxDeadline, + KeyVersion: &keyVersion, + RemoteVerification: &remoteVerification, + } +} + +// UnpackRPCOptions builds the []signature.RPCOption from common.RPCOptions. +func UnpackRPCOptions(commonOpts *common.RPCOptions) []signature.RPCOption { + opts := []signature.RPCOption{} + if commonOpts.CtxDeadline != nil { + // no need fot this package to cancel the context early, + // and users may still check if the deadline is exceeded with ctx.Err(). + ctx, _ := context.WithDeadline(context.Background(), *commonOpts.CtxDeadline) //nolint:govet + opts = append(opts, options.WithContext(ctx)) + } + if commonOpts.KeyVersion != nil { + opts = append(opts, options.WithKeyVersion(*commonOpts.KeyVersion)) + } + if commonOpts.RemoteVerification != nil { + opts = append(opts, options.WithRemoteVerification(*commonOpts.RemoteVerification)) + } + return opts +} + +// PackMessageOptions extracts properties of all of opts into struct ready for serializing. +func PackMessageOptions(opts []signature.MessageOption) *common.MessageOptions { + var digest []byte + var signerOpts crypto.SignerOpts + for _, opt := range opts { + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&signerOpts) + } + var hashFunc *crypto.Hash + if signerOpts != nil { + hf := signerOpts.HashFunc() + hashFunc = &hf + } + return &common.MessageOptions{ + Digest: &digest, + HashFunc: hashFunc, + } +} + +// PackPublicKeyOptions extracts properties of all of opts into struct ready for serializing. +func PackPublicKeyOptions(opts []signature.PublicKeyOption) *common.PublicKeyOptions { + rpcOpts := []signature.RPCOption{} + for _, opt := range opts { + rpcOpts = append(rpcOpts, opt) + } + return &common.PublicKeyOptions{ + RPCOptions: *PackRPCOptions(rpcOpts), + } +} + +// UnpackPublicKeyOptions builds the []signature.PublicKeyOption from common.PublicKeyOptions. +func UnpackPublicKeyOptions(commonOpts *common.PublicKeyOptions) []signature.PublicKeyOption { + opts := []signature.PublicKeyOption{} + for _, opt := range UnpackRPCOptions(&commonOpts.RPCOptions) { + opt, ok := opt.(signature.PublicKeyOption) + if !ok { + panic("cannot assert as PublicKeyOption") + } + opts = append(opts, opt) + } + return opts +} + +// UnpackMessageOptions builds the []signature.MessageOption from common.MessageOptions. +func UnpackMessageOptions(commonOpts *common.MessageOptions) []signature.MessageOption { + opts := []signature.MessageOption{} + if commonOpts.Digest != nil { + opts = append(opts, options.WithDigest(*commonOpts.Digest)) + } + if commonOpts.HashFunc != nil { + opts = append(opts, options.WithCryptoSignerOpts(*commonOpts.HashFunc)) + } + return opts +} + +// PackSignOptions extracts properties of all of opts into struct ready for serializing, +func PackSignOptions(opts []signature.SignOption) *common.SignOptions { + rpcOpts := []signature.RPCOption{} + for _, opt := range opts { + rpcOpts = append(rpcOpts, opt) + } + messageOpts := []signature.MessageOption{} + for _, opt := range opts { + messageOpts = append(messageOpts, opt) + } + return &common.SignOptions{ + RPCOptions: *PackRPCOptions(rpcOpts), + MessageOptions: *PackMessageOptions(messageOpts), + } +} + +// UnpackSignOptions builds the []]signature.SignOption from common.SignOptions. +func UnpackSignOptions(commonOpts *common.SignOptions) []signature.SignOption { + opts := []signature.SignOption{} + for _, opt := range UnpackRPCOptions(&commonOpts.RPCOptions) { + opt, ok := opt.(signature.SignOption) + if !ok { + panic("cannot assert as SignOption") + } + opts = append(opts, opt) + } + for _, opt := range UnpackMessageOptions(&commonOpts.MessageOptions) { + opt, ok := opt.(signature.SignOption) + if !ok { + panic("cannot assert as SignOption") + } + opts = append(opts, opt) + } + return opts +} + +// PackVerifyOptions extracts properties of all of opts into struct ready for serializing, +func PackVerifyOptions(opts []signature.VerifyOption) *common.VerifyOptions { + rpcOpts := []signature.RPCOption{} + for _, opt := range opts { + rpcOpts = append(rpcOpts, opt) + } + messageOpts := []signature.MessageOption{} + for _, opt := range opts { + messageOpts = append(messageOpts, opt) + } + return &common.VerifyOptions{ + RPCOptions: *PackRPCOptions(rpcOpts), + MessageOptions: *PackMessageOptions(messageOpts), + } +} + +// UnpackVerifyOptions builds the []]signature.VerifyOption from common.VerifyOptions. +func UnpackVerifyOptions(commonOpts *common.VerifyOptions) []signature.VerifyOption { + opts := []signature.VerifyOption{} + for _, opt := range UnpackRPCOptions(&commonOpts.RPCOptions) { + opt, ok := opt.(signature.VerifyOption) + if !ok { + panic("cannot assert as VerifyOption") + } + opts = append(opts, opt) + } + for _, opt := range UnpackMessageOptions(&commonOpts.MessageOptions) { + opt, ok := opt.(signature.VerifyOption) + if !ok { + panic("cannot assert as VerifyOption") + } + opts = append(opts, opt) + } + return opts +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/internal/signerverifier/interface.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/internal/signerverifier/interface.go new file mode 100644 index 00000000..db17180d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/internal/signerverifier/interface.go @@ -0,0 +1,35 @@ +// +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package signerverifier contains interface for to be implemented by KMSs. +package signerverifier + +import ( + "context" + "crypto" + + "github.com/sigstore/sigstore/pkg/signature" +) + +// SignerVerifier creates and verifies digital signatures over a message using a KMS service +// The contents must be kept in sync with kms.SignerVerifier, to continue satisfying that interface. +// We don't directly embed kms.SignerVerfifier because then we would have an import cycle. +type SignerVerifier interface { + signature.SignerVerifier + CreateKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) + CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) + SupportedAlgorithms() []string + DefaultAlgorithm() string +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/signer.go new file mode 100644 index 00000000..ef4bb4fe --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/signer.go @@ -0,0 +1,246 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cliplugin implements the plugin functionality. +package cliplugin + +import ( + "bytes" + "context" + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common" + "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +var ( + // ErrorExecutingPlugin indicates a problem executing the plugin program. + ErrorExecutingPlugin = errors.New("error executing plugin program") + // ErrorResponseParse indicates a problem parsing the plugin response. + ErrorResponseParse = errors.New("parsing plugin response") + // ErrorPluginReturnError indicates that the plugin returned a praseable error. + ErrorPluginReturnError = errors.New("plugin returned error") +) + +// PluginClient implements signerverifier.SignerVerifier with calls to our plugin program. +type PluginClient struct { + executable string + initOptions common.InitOptions + makeCmdFunc makeCmdFunc +} + +// newPluginClient creates a new PluginClient. +func newPluginClient(executable string, initOptions *common.InitOptions, makeCmd makeCmdFunc) *PluginClient { + pluginClient := &PluginClient{ + executable: executable, + initOptions: *initOptions, + makeCmdFunc: makeCmd, + } + return pluginClient +} + +// invokePlugin invokes the plugin program and parses its response. +func (c PluginClient) invokePlugin(ctx context.Context, stdin io.Reader, methodArgs *common.MethodArgs) (*common.PluginResp, error) { + pluginArgs := &common.PluginArgs{ + InitOptions: &c.initOptions, + MethodArgs: methodArgs, + } + argsEnc, err := json.Marshal(pluginArgs) + if err != nil { + return nil, err + } + cmd := c.makeCmdFunc(ctx, stdin, os.Stderr, c.executable, common.ProtocolVersion, string(argsEnc)) + // We won't look at the program's non-zero exit code, but we will respect any other + // error, and cases when exec.ExitError.ExitCode() is 0 or -1: + // * (0) the program finished successfully or + // * (-1) there was some other problem not due to the program itself. + // The only debugging is to either parse the the returned error in stdout, + // or for the user to examine the sterr logs. + // See https://pkg.go.dev/os#ProcessState.ExitCode. + stdout, err := cmd.Output() + var exitError cmdExitError + if err != nil && (!errors.As(err, &exitError) || exitError.ExitCode() < 1) { + return nil, fmt.Errorf("%w: %w", ErrorExecutingPlugin, err) + } + var resp common.PluginResp + if unmarshallErr := json.Unmarshal(stdout, &resp); unmarshallErr != nil { + return nil, fmt.Errorf("%w: %w", ErrorResponseParse, unmarshallErr) + } + if resp.ErrorMessage != "" { + return nil, fmt.Errorf("%w: %s", ErrorPluginReturnError, resp.ErrorMessage) + } + return &resp, nil +} + +// DefaultAlgorithm calls and returns the plugin's implementation of DefaultAlgorithm(). +func (c PluginClient) DefaultAlgorithm() string { + args := &common.MethodArgs{ + MethodName: common.DefaultAlgorithmMethodName, + DefaultAlgorithm: &common.DefaultAlgorithmArgs{}, + } + resp, err := c.invokePlugin(context.Background(), nil, args) + if err != nil { + log.Fatal(err) + } + return resp.DefaultAlgorithm.DefaultAlgorithm +} + +// SupportedAlgorithms calls and returns the plugin's implementation of SupportedAlgorithms(). +func (c PluginClient) SupportedAlgorithms() []string { + args := &common.MethodArgs{ + MethodName: common.SupportedAlgorithmsMethodName, + SupportedAlgorithms: &common.SupportedAlgorithmsArgs{}, + } + resp, err := c.invokePlugin(context.Background(), nil, args) + if err != nil { + log.Fatal(err) + } + return resp.SupportedAlgorithms.SupportedAlgorithms +} + +// CreateKey calls and returns the plugin's implementation of CreateKey(). +func (c PluginClient) CreateKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) { + args := &common.MethodArgs{ + MethodName: common.CreateKeyMethodName, + CreateKey: &common.CreateKeyArgs{ + Algorithm: algorithm, + }, + } + if deadline, ok := ctx.Deadline(); ok { + args.CreateKey.CtxDeadline = &deadline + } + resp, err := c.invokePlugin(ctx, nil, args) + if err != nil { + return nil, err + } + return cryptoutils.UnmarshalPEMToPublicKey(resp.CreateKey.PublicKeyPEM) +} + +// PublicKey calls and returns the plugin's implementation of PublicKey(). +// If the opts contain a context, then it will be used with the Cmd. +func (c PluginClient) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + args := &common.MethodArgs{ + MethodName: common.PublicKeyMethodName, + PublicKey: &common.PublicKeyArgs{ + PublicKeyOptions: encoding.PackPublicKeyOptions(opts), + }, + } + ctx := context.Background() + for _, opt := range opts { + opt.ApplyContext(&ctx) + } + resp, err := c.invokePlugin(ctx, nil, args) + if err != nil { + return nil, err + } + return cryptoutils.UnmarshalPEMToPublicKey(resp.PublicKey.PublicKeyPEM) +} + +// SignMessage calls and returns the plugin's implementation of SignMessage(). +// If the opts contain a context, then it will be used with the Cmd. +func (c PluginClient) SignMessage(message io.Reader, opts ...signature.SignOption) ([]byte, error) { + args := &common.MethodArgs{ + MethodName: common.SignMessageMethodName, + SignMessage: &common.SignMessageArgs{ + SignOptions: encoding.PackSignOptions(opts), + }, + } + ctx := context.Background() + for _, opt := range opts { + opt.ApplyContext(&ctx) + } + resp, err := c.invokePlugin(ctx, message, args) + if err != nil { + return nil, err + } + signature := resp.SignMessage.Signature + return signature, nil +} + +// VerifySignature calls and returns the plugin's implementation of VerifySignature(). +// If the opts contain a context, then it will be used with the Cmd. +func (c PluginClient) VerifySignature(signature, message io.Reader, opts ...signature.VerifyOption) error { + // signatures won't be larger than 1MB, so it's fine to read the entire content into memory. + signatureBytes, err := io.ReadAll(signature) + if err != nil { + return err + } + args := &common.MethodArgs{ + MethodName: common.VerifySignatureMethodName, + VerifySignature: &common.VerifySignatureArgs{ + Signature: signatureBytes, + VerifyOptions: encoding.PackVerifyOptions(opts), + }, + } + ctx := context.Background() + for _, opt := range opts { + opt.ApplyContext(&ctx) + } + _, err = c.invokePlugin(ctx, message, args) + return err +} + +// CryptoSigner is a wrapper around PluginClient. +type CryptoSigner struct { + client *PluginClient + ctx context.Context + errFunc func(error) +} + +// CryptoSigner returns a wrapper around PluginClient. +func (c PluginClient) CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) { + if err := ctx.Err(); err != nil { + return nil, nil, err + } + return &CryptoSigner{ + client: &c, + ctx: ctx, + errFunc: errFunc, + }, c.initOptions.HashFunc, nil +} + +// Sign is a wrapper around PluginClient.SignMessage(). The first argument for a rand source is not used. +func (c CryptoSigner) Sign(_ io.Reader, digest []byte, cryptoSignerOpts crypto.SignerOpts) (sig []byte, err error) { + emptyMessage := bytes.NewReader([]byte("")) + opts := []signature.SignOption{ + options.WithCryptoSignerOpts(cryptoSignerOpts.HashFunc()), + options.WithDigest(digest), + // the client's initializing ctx should not be used in calls to its methods. + } + sig, err = c.client.SignMessage(emptyMessage, opts...) + if err != nil && c.errFunc != nil { + c.errFunc(err) + } + return sig, err +} + +// Public is a wrapper around PluginClient.PublicKey(). +func (c CryptoSigner) Public() crypto.PublicKey { + publicKey, err := c.client.PublicKey() + if err != nil && c.errFunc != nil { + c.errFunc(err) + // we don't panic here. + } + return publicKey +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go index 7095eb10..7baf9504 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go @@ -19,10 +19,13 @@ package kms import ( "context" "crypto" + "errors" "fmt" + "os/exec" "strings" "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin" ) // ProviderNotFoundError indicates that no matching KMS provider was found @@ -48,15 +51,27 @@ func AddProvider(keyResourceID string, init ProviderInit) { var providersMap = map[string]ProviderInit{} // Get returns a KMS SignerVerifier for the given resource string and hash function. -// If no matching provider is found, Get returns a ProviderNotFoundError. It -// also returns an error if initializing the SignerVerifier fails. +// If no matching built-in provider is found, it will try to use the plugin system as a provider. +// It returns a ProviderNotFoundError in these situations: +// - keyResourceID doesn't match any of our hard-coded providers' schemas, +// - the plugin name and key ref cannot be parsed from the input keyResourceID, +// - the plugin program, can't be found. +// It also returns an error if initializing the SignerVerifier fails. func Get(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts ...signature.RPCOption) (SignerVerifier, error) { for ref, pi := range providersMap { if strings.HasPrefix(keyResourceID, ref) { - return pi(ctx, keyResourceID, hashFunc, opts...) + sv, err := pi(ctx, keyResourceID, hashFunc, opts...) + if err != nil { + return nil, err + } + return sv, nil } } - return nil, &ProviderNotFoundError{ref: keyResourceID} + sv, err := cliplugin.LoadSignerVerifier(ctx, keyResourceID, hashFunc, opts...) + if errors.Is(err, exec.ErrNotFound) || errors.Is(err, cliplugin.ErrorInputKeyResourceID) { + return nil, fmt.Errorf("%w: %w", &ProviderNotFoundError{ref: keyResourceID}, err) + } + return sv, err } // SupportedProviders returns list of initialized providers diff --git a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go index 3f21ec0a..d03973e7 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go @@ -71,6 +71,7 @@ var ( // getRemoteRoot is a var for testing. var getRemoteRoot = func() string { return DefaultRemoteRoot } +// Deprecated: Use https://pkg.go.dev/github.com/sigstore/sigstore-go/pkg/tuf type TUF struct { sync.Mutex client *client.Client @@ -452,37 +453,7 @@ func (t *TUF) GetTargetsByMeta(usage UsageKind, fallbacks []string) ([]TargetFil func (t *TUF) updateClient() (data.TargetFiles, error) { targets, err := t.client.Update() if err != nil { - // Get some extra information for debugging. What was the state of the top-level - // metadata on the remote? - status := struct { - Mirror string `json:"mirror"` - Metadata map[string]MetadataStatus `json:"metadata"` - }{ - Mirror: t.Mirror(), - Metadata: make(map[string]MetadataStatus), - } - for _, md := range []string{"root.json", "targets.json", "snapshot.json", "timestamp.json"} { - r, _, err := t.remote.GetMeta(md) - if err != nil { - // May be missing, or failed download. - continue - } - defer r.Close() - b, err := io.ReadAll(r) - if err != nil { - continue - } - mdStatus, err := getMetadataStatus(b) - if err != nil { - continue - } - status.Metadata[md] = *mdStatus - } - b, innerErr := json.MarshalIndent(status, "", "\t") - if innerErr != nil { - return nil, innerErr - } - return nil, fmt.Errorf("error updating to TUF remote mirror: %w\nremote status:%s", err, string(b)) + return nil, fmt.Errorf("error updating to TUF remote mirror: %w", err) } // Success! Cache new metadata, if needed. if noCache() { diff --git a/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go b/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go index d47f57f1..82bebdd2 100644 --- a/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go +++ b/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go @@ -250,7 +250,7 @@ func verifyTSRWithChain(ts *timestamp.Timestamp, opts VerifyOpts) error { return fmt.Errorf("error parsing hashed message: %w", err) } - if opts.Roots == nil || len(opts.Roots) == 0 { + if len(opts.Roots) == 0 { return fmt.Errorf("no root certificates provided for verifying the certificate chain") } diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index d49bbf83..cd9c0488 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -18,6 +18,14 @@ import ( var errNegativeNotAllowed = errors.New("unable to cast negative value") +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + // ToTimeE casts an interface to a time.Time type. func ToTimeE(i interface{}) (tim time.Time, err error) { return ToTimeInDefaultLocationE(i, time.UTC) @@ -77,11 +85,14 @@ func ToDurationE(i interface{}) (d time.Duration, err error) { d, err = time.ParseDuration(s + "ns") } return - case json.Number: + case float64EProvider: var v float64 v, err = s.Float64() d = time.Duration(v) return + case float64Provider: + d = time.Duration(s.Float64()) + return default: err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) return @@ -174,12 +185,14 @@ func ToFloat64E(i interface{}) (float64, error) { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case float64Provider: + return s.Float64(), nil case bool: if s { return 1, nil @@ -230,12 +243,14 @@ func ToFloat32E(i interface{}) (float32, error) { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case float64Provider: + return float32(s.Float64()), nil case bool: if s { return 1, nil @@ -917,8 +932,8 @@ func indirectToStringerOrError(a interface{}) interface{} { return nil } - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + errorType := reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() v := reflect.ValueOf(a) for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { @@ -987,7 +1002,7 @@ func ToStringE(i interface{}) (string, error) { // ToStringMapStringE casts an interface to a map[string]string type. func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} + m := map[string]string{} switch v := i.(type) { case map[string]string: @@ -1017,7 +1032,7 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) { // ToStringMapStringSliceE casts an interface to a map[string][]string type. func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} + m := map[string][]string{} switch v := i.(type) { case map[string][]string: @@ -1081,7 +1096,7 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { // ToStringMapBoolE casts an interface to a map[string]bool type. func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} + m := map[string]bool{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1106,7 +1121,7 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) { // ToStringMapE casts an interface to a map[string]interface{} type. func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} + m := map[string]interface{}{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1126,7 +1141,7 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) { // ToStringMapIntE casts an interface to a map[string]int{} type. func ToStringMapIntE(i interface{}) (map[string]int, error) { - var m = map[string]int{} + m := map[string]int{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) } @@ -1167,7 +1182,7 @@ func ToStringMapIntE(i interface{}) (map[string]int, error) { // ToStringMapInt64E casts an interface to a map[string]int64{} type. func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - var m = map[string]int64{} + m := map[string]int64{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) } @@ -1404,38 +1419,35 @@ func (f timeFormat) hasTimezone() bool { return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone } -var ( - timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, - } -) +var timeFormats = []timeFormat{ + // Keep common formats at the top. + {"2006-01-02", timeFormatNoTimezone}, + {time.RFC3339, timeFormatNumericTimezone}, + {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, timeFormatNumericTimezone}, + {time.RFC1123, timeFormatNamedTimezone}, + {time.RFC822Z, timeFormatNumericTimezone}, + {time.RFC822, timeFormatNamedTimezone}, + {time.RFC850, timeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", timeFormatNoTimezone}, + {time.ANSIC, timeFormatNoTimezone}, + {time.UnixDate, timeFormatNamedTimezone}, + {time.RubyDate, timeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + {"02 Jan 2006", timeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + {time.Kitchen, timeFormatTimeOnly}, + {time.Stamp, timeFormatTimeOnly}, + {time.StampMilli, timeFormatTimeOnly}, + {time.StampMicro, timeFormatTimeOnly}, + {time.StampNano, timeFormatTimeOnly}, +} func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { if d, e = time.Parse(format.format, s); e == nil { diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index a618ec24..2c8f4808 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -26,33 +26,28 @@ linters: - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck + - staticcheck #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - #- stylecheck + - stylecheck #- typecheck - unconvert #- unparam diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 6444f4b7..71757151 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,4 +1,5 @@ -![cobra logo](assets/CobraMain.png) + +![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) Cobra is a library for creating powerful modern CLI applications. @@ -105,7 +106,7 @@ go install github.com/spf13/cobra-cli@latest For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). +For complete details on using the Cobra library, please read [The Cobra User Guide](site/content/user_guide.md). # License diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 5f965e05..b3e2dadf 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,21 +17,17 @@ package cobra import ( "fmt" "os" - "regexp" - "strings" ) const ( activeHelpMarker = "_activeHelp_ " // The below values should not be changed: programs will be using them explicitly // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix activeHelpGlobalDisable = "0" ) -var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) - // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -39,7 +35,7 @@ var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) // This function can be called multiple times before and/or after completions are added to // the array. Each time this function is called with the same array, the new // ActiveHelp line will be shown below the previous ones when completion is triggered. -func AppendActiveHelp(compArray []string, activeHelpStr string) []string { +func AppendActiveHelp(compArray []Completion, activeHelpStr string) []Completion { return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) } @@ -60,8 +56,5 @@ func GetActiveHelpConfig(cmd *Command) string { // variable. It has the format _ACTIVE_HELP where is the name of the // root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") - return activeHelpEnvVar + return configEnvVar(name, activeHelpEnvVarSuffix) } diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index e79ec33a..ed1e70ce 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -52,9 +52,9 @@ func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } for _, v := range args { if !stringInSlice(v, validArgs) { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 8a531518..f4d198cb 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -597,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -621,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 1cce5c32..d2397aa3 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -146,7 +146,7 @@ __%[1]s_process_completion_results() { if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering - local fullFilter filter filteringCmd + local fullFilter="" filter filteringCmd # Do not use quotes around the $completions variable or else newline # characters will be kept. @@ -177,20 +177,71 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish + __%[1]s_handle_activeHelp +} + +__%[1]s_handle_activeHelp() { + # Print the activeHelp statements if ((${#activeHelp[*]} != 0)); then - printf "\n"; - printf "%%s\n" "${activeHelp[@]}" - printf "\n" - - # The prompt format is only available from bash 4.4. - # We test if it is available before using it. - if (x=${PS1@P}) 2> /dev/null; then - printf "%%s" "${PS1@P}${COMP_LINE[@]}" - else - # Can't print the prompt. Just print the - # text the user had typed, it is workable enough. - printf "%%s" "${COMP_LINE[@]}" + if [ -z $COMP_TYPE ]; then + # Bash v3 does not set the COMP_TYPE variable. + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + __%[1]s_reprint_commandLine + return fi + + # Only print ActiveHelp on the second TAB press + if [ $COMP_TYPE -eq 63 ]; then + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + if ((${#COMPREPLY[*]} == 0)); then + # When there are no completion choices from the program, file completion + # may kick in if the program has not disabled it; in such a case, we want + # to know if any files will match what the user typed, so that we know if + # there will be completions presented, so that we know how to handle ActiveHelp. + # To find out, we actually trigger the file completion ourselves; + # the call to _filedir will fill COMPREPLY if files match. + if (((directive & shellCompDirectiveNoFileComp) == 0)); then + __%[1]s_debug "Listing files" + _filedir + fi + fi + + if ((${#COMPREPLY[*]} != 0)); then + # If there are completion choices to be shown, print a delimiter. + # Re-printing the command-line will automatically be done + # by the shell when it prints the completion choices. + printf -- "--" + else + # When there are no completion choices at all, we need + # to re-print the command-line since the shell will + # not be doing it itself. + __%[1]s_reprint_commandLine + fi + elif [ $COMP_TYPE -eq 37 ] || [ $COMP_TYPE -eq 42 ]; then + # For completion type: menu-complete/menu-complete-backward and insert-completions + # the completions are immediately inserted into the command-line, so we first + # print the activeHelp message and reprint the command-line since the shell won't. + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + __%[1]s_reprint_commandLine + fi + fi +} + +__%[1]s_reprint_commandLine() { + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" fi } @@ -201,6 +252,8 @@ __%[1]s_extract_activeHelp() { local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do + [[ -z $comp ]] && continue + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" @@ -223,16 +276,21 @@ __%[1]s_handle_completion_types() { # If the user requested inserting one completion at a time, or all # completions at once on the command-line we must remove the descriptions. # https://github.com/spf13/cobra/issues/1508 - local tab=$'\t' comp - while IFS='' read -r comp; do - [[ -z $comp ]] && continue - # Strip any description - comp=${comp%%%%$tab*} - # Only consider the completions that match - if [[ $comp == "$cur"* ]]; then - COMPREPLY+=("$comp") - fi - done < <(printf "%%s\n" "${completions[@]}") + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 + + local tab=$'\t' + + # Strip any description and escape the completion to handled special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]%%%%$tab*}") + + # Only consider the completions that match + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so we need to escape all completions again since they will + # all be inserted on the command-line. + IFS=$'\n' read -ra COMPREPLY -d '' < <(printf "%%q\n" "${COMPREPLY[@]}") ;; *) @@ -243,11 +301,25 @@ __%[1]s_handle_completion_types() { } __%[1]s_handle_standard_completion_case() { - local tab=$'\t' comp + local tab=$'\t' + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 # Short circuit to optimize if we don't have descriptions if [[ "${completions[*]}" != *$tab* ]]; then - IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + # First, escape the completions to handle special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]}") + # Only consider the completions that match what the user typed + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so, if there is only a single completion, we need to + # escape it again because it will be inserted on the command-line. If there are multiple + # completions, we don't want to escape them because they will be printed in a list + # and we don't want to show escape characters in that list. + if (( ${#COMPREPLY[@]} == 1 )); then + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]}") + fi return 0 fi @@ -256,23 +328,39 @@ __%[1]s_handle_standard_completion_case() { # Look for the longest completion so that we can format things nicely while IFS='' read -r compline; do [[ -z $compline ]] && continue - # Strip any description before checking the length - comp=${compline%%%%$tab*} + + # Before checking if the completion matches what the user typed, + # we need to strip any description and escape the completion to handle special + # characters because those escape characters are part of what the user typed. + # Don't call "printf" in a sub-shell because it will be much slower + # since we are in a loop. + printf -v comp "%%q" "${compline%%%%$tab*}" &>/dev/null || comp=$(printf "%%q" "${compline%%%%$tab*}") + # Only consider the completions that match [[ $comp == "$cur"* ]] || continue + + # The completions matches. Add it to the list of full completions including + # its description. We don't escape the completion because it may get printed + # in a list if there are more than one and we don't want show escape characters + # in that list. COMPREPLY+=("$compline") + + # Strip any description before checking the length, and again, don't escape + # the completion because this length is only used when printing the completions + # in a list and we don't want show escape characters in that list. + comp=${compline%%%%$tab*} if ((${#comp}>longest)); then longest=${#comp} fi done < <(printf "%%s\n" "${completions[@]}") - # If there is a single completion left, remove the description text + # If there is a single completion left, remove the description text and escape any special characters if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" - comp="${COMPREPLY[0]%%%%$tab*}" - __%[1]s_debug "Removed description from single completion, which is now: ${comp}" - COMPREPLY[0]=$comp - else # Format the descriptions + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]%%%%$tab*}") + __%[1]s_debug "Removed description from single completion, which is now: ${COMPREPLY[0]}" + else + # Format the descriptions __%[1]s_format_comp_descriptions $longest fi } diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index a6b160ce..d9cd2414 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -176,12 +176,16 @@ func rpad(s string, padding int) string { return fmt.Sprintf(formattedString, s) } -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) +func tmpl(text string) *tmplFunc { + return &tmplFunc{ + tmpl: text, + fn: func(w io.Writer, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) + }, + } } // ld compares two strings and returns the levenshtein distance between them. @@ -193,8 +197,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 2fbe6c13..dbb2c298 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -33,6 +33,9 @@ import ( const ( FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" + + helpFlagName = "help" + helpCommandName = "help" ) // FParseErrWhitelist configures Flag parse errors to be ignored @@ -80,11 +83,11 @@ type Command struct { Example string // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - ValidArgs []string + ValidArgs []Completion // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. // It is a dynamic version of using ValidArgs. // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + ValidArgsFunction CompletionFunc // Expected arguments Args PositionalArgs @@ -154,8 +157,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -166,12 +171,12 @@ type Command struct { // usageFunc is usage func defined by user. usageFunc func(*Command) error // usageTemplate is usage template defined by user. - usageTemplate string + usageTemplate *tmplFunc // flagErrorFunc is func defined by user and it's called when the parsing of // flags returns an error. flagErrorFunc func(*Command, error) error // helpTemplate is help template defined by user. - helpTemplate string + helpTemplate *tmplFunc // helpFunc is help func defined by user. helpFunc func(*Command, []string) // helpCommand is command with usage 'help'. If it's not defined by user, @@ -184,7 +189,7 @@ type Command struct { completionCommandGroupID string // versionTemplate is the version template defined by user. - versionTemplate string + versionTemplate *tmplFunc // errPrefix is the error message prefix defined by user. errPrefix string @@ -279,6 +284,7 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. +// // Deprecated: Use SetOut and/or SetErr instead func (c *Command) SetOutput(output io.Writer) { c.outWriter = output @@ -310,7 +316,11 @@ func (c *Command) SetUsageFunc(f func(*Command) error) { // SetUsageTemplate sets usage template. Can be defined by Application. func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s + if s == "" { + c.usageTemplate = nil + return + } + c.usageTemplate = tmpl(s) } // SetFlagErrorFunc sets a function to generate an error when flag parsing @@ -346,12 +356,20 @@ func (c *Command) SetCompletionCommandGroupID(groupID string) { // SetHelpTemplate sets help template to be used. Application can use it to set custom template. func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s + if s == "" { + c.helpTemplate = nil + return + } + c.helpTemplate = tmpl(s) } // SetVersionTemplate sets version template to be used. Application can use it to set custom template. func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s + if s == "" { + c.versionTemplate = nil + return + } + c.versionTemplate = tmpl(s) } // SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. @@ -432,7 +450,8 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } return func(c *Command) error { c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + fn := c.getUsageTemplateFunc() + err := fn(c.OutOrStderr(), c) if err != nil { c.PrintErrln(err) } @@ -440,6 +459,19 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } } +// getUsageTemplateFunc returns the usage template function for the command +// going up the command tree if necessary. +func (c *Command) getUsageTemplateFunc() func(w io.Writer, data interface{}) error { + if c.usageTemplate != nil { + return c.usageTemplate.fn + } + + if c.HasParent() { + return c.parent.getUsageTemplateFunc() + } + return defaultUsageFunc +} + // Usage puts out the usage for the command. // Used when a user provides invalid input. // Can be defined by user by overriding UsageFunc. @@ -458,15 +490,30 @@ func (c *Command) HelpFunc() func(*Command, []string) { } return func(c *Command, a []string) { c.mergePersistentFlags() + fn := c.getHelpTemplateFunc() // The help should be sent to stdout // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + err := fn(c.OutOrStdout(), c) if err != nil { c.PrintErrln(err) } } } +// getHelpTemplateFunc returns the help template function for the command +// going up the command tree if necessary. +func (c *Command) getHelpTemplateFunc() func(w io.Writer, data interface{}) error { + if c.helpTemplate != nil { + return c.helpTemplate.fn + } + + if c.HasParent() { + return c.parent.getHelpTemplateFunc() + } + + return defaultHelpFunc +} + // Help puts out the help for the command. // Used when a user calls help [command]. // Can be defined by user by overriding HelpFunc. @@ -541,71 +588,55 @@ func (c *Command) NamePadding() int { } // UsageTemplate returns usage template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate + if c.usageTemplate != nil { + return c.usageTemplate.tmpl } if c.HasParent() { return c.parent.UsageTemplate() } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} - -Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} - -{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} - -Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` + return defaultUsageTemplate } // HelpTemplate return help template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate + if c.helpTemplate != nil { + return c.helpTemplate.tmpl } if c.HasParent() { return c.parent.HelpTemplate() } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + return defaultHelpTemplate } // VersionTemplate return version template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate + if c.versionTemplate != nil { + return c.versionTemplate.tmpl } if c.HasParent() { return c.parent.VersionTemplate() } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` + return defaultVersionTemplate +} + +// getVersionTemplateFunc returns the version template function for the command +// going up the command tree if necessary. +func (c *Command) getVersionTemplateFunc() func(w io.Writer, data interface{}) error { + if c.versionTemplate != nil { + return c.versionTemplate.fn + } + + if c.HasParent() { + return c.parent.getVersionTemplateFunc() + } + return defaultVersionFunc } // ErrPrefix return error message prefix for the command @@ -706,7 +737,7 @@ Loop: // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, // return the args, excluding the one at this position. if s == x { - ret := []string{} + ret := make([]string, 0, len(args)-1) ret = append(ret, args[:pos]...) ret = append(ret, args[pos+1:]...) return ret @@ -754,14 +785,14 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { @@ -873,7 +904,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -892,7 +923,7 @@ func (c *Command) execute(a []string) (err error) { // If help is called, regardless of other flags, return we want help. // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") + helpVal, err := c.Flags().GetBool(helpFlagName) if err != nil { // should be impossible to get here as we always declare a help // flag in InitDefaultHelpFlag() @@ -912,7 +943,8 @@ func (c *Command) execute(a []string) (err error) { return err } if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + fn := c.getVersionTemplateFunc() + err := fn(c.OutOrStdout(), c) if err != nil { c.Println(err) } @@ -1066,12 +1098,6 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() - // initialize completion at the last point to allow for user overriding - c.InitDefaultCompletionCmd() - - // Now that all commands have been created, let's make sure all groups - // are properly created also - c.checkCommandGroups() args := c.args @@ -1080,9 +1106,16 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } - // initialize the hidden command to be used for shell completion + // initialize the __complete command to be used for shell completion c.initCompleteCmd(args) + // initialize the default completion command + c.InitDefaultCompletionCmd(args...) + + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + var flags []string if c.TraverseChildren { cmd, flags, err = c.Traverse(args) @@ -1185,15 +1218,16 @@ func (c *Command) checkCommandGroups() { // If c already has help flag, it will do nothing. func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { + if c.Flags().Lookup(helpFlagName) == nil { usage := "help for " - if c.Name() == "" { + name := c.DisplayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } - c.Flags().BoolP("help", "h", false, usage) - _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) + c.Flags().BoolP(helpFlagName, "h", false, usage) + _ = c.Flags().SetAnnotation(helpFlagName, FlagSetByCobraAnnotation, []string{"true"}) } } @@ -1212,7 +1246,7 @@ func (c *Command) InitDefaultVersionFlag() { if c.Name() == "" { usage += "this command" } else { - usage += c.Name() + usage += c.DisplayName() } if c.Flags().ShorthandLookup("v") == nil { c.Flags().BoolP("version", "v", false, usage) @@ -1236,9 +1270,9 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, - ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - var completions []string +Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { + var completions []Completion cmd, _, e := c.Root().Find(args) if e != nil { return nil, ShellCompDirectiveNoFileComp @@ -1250,7 +1284,7 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`, for _, subCmd := range cmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } } } @@ -1427,6 +1461,12 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.DisplayName() +} + +// DisplayName returns the name to display in help text. Returns command Name() +// If CommandDisplayNameAnnoation is not set +func (c *Command) DisplayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1436,10 +1476,11 @@ func (c *Command) CommandPath() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.DisplayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1452,7 +1493,6 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. -// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -1642,7 +1682,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1653,10 +1693,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1666,11 +1707,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1693,11 +1735,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1718,6 +1761,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1725,7 +1769,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1738,9 +1782,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1857,7 +1901,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } @@ -1883,3 +1927,141 @@ func commandNameMatches(s string, t string) bool { return s == t } + +// tmplFunc holds a template and a function that will execute said template. +type tmplFunc struct { + tmpl string + fn func(io.Writer, interface{}) error +} + +var defaultUsageTemplate = `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} + +Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} + +{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} + +Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` + +// defaultUsageFunc is equivalent to executing defaultUsageTemplate. The two should be changed in sync. +func defaultUsageFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + fmt.Fprint(w, "Usage:") + if c.Runnable() { + fmt.Fprintf(w, "\n %s", c.UseLine()) + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n %s [command]", c.CommandPath()) + } + if len(c.Aliases) > 0 { + fmt.Fprintf(w, "\n\nAliases:\n") + fmt.Fprintf(w, " %s", c.NameAndAliases()) + } + if c.HasExample() { + fmt.Fprintf(w, "\n\nExamples:\n") + fmt.Fprintf(w, "%s", c.Example) + } + if c.HasAvailableSubCommands() { + cmds := c.Commands() + if len(c.Groups()) == 0 { + fmt.Fprintf(w, "\n\nAvailable Commands:") + for _, subcmd := range cmds { + if subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } else { + for _, group := range c.Groups() { + fmt.Fprintf(w, "\n\n%s", group.Title) + for _, subcmd := range cmds { + if subcmd.GroupID == group.ID && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + if !c.AllChildCommandsHaveGroup() { + fmt.Fprintf(w, "\n\nAdditional Commands:") + for _, subcmd := range cmds { + if subcmd.GroupID == "" && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + } + } + if c.HasAvailableLocalFlags() { + fmt.Fprintf(w, "\n\nFlags:\n") + fmt.Fprint(w, trimRightSpace(c.LocalFlags().FlagUsages())) + } + if c.HasAvailableInheritedFlags() { + fmt.Fprintf(w, "\n\nGlobal Flags:\n") + fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) + } + if c.HasHelpSubCommands() { + fmt.Fprintf(w, "\n\nAdditional help topcis:") + for _, subcmd := range c.Commands() { + if subcmd.IsAdditionalHelpTopicCommand() { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) + } + } + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n\nUse \"%s [command] --help\" for more information about a command.", c.CommandPath()) + } + fmt.Fprintln(w) + return nil +} + +var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + +// defaultHelpFunc is equivalent to executing defaultHelpTemplate. The two should be changed in sync. +func defaultHelpFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + usage := c.Long + if usage == "" { + usage = c.Short + } + usage = trimRightSpace(usage) + if usage != "" { + fmt.Fprintln(w, usage) + fmt.Fprintln(w) + } + if c.Runnable() || c.HasSubCommands() { + fmt.Fprint(w, c.UsageString()) + } + return nil +} + +var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` + +// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. +func defaultVersionFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + _, err := fmt.Fprintf(w, "%s version %s\n", c.DisplayName(), c.Version) + return err +} diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index b60f6b20..a1752f76 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -17,6 +17,8 @@ package cobra import ( "fmt" "os" + "regexp" + "strconv" "strings" "sync" @@ -33,7 +35,7 @@ const ( ) // Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} +var flagCompletionFunctions = map[*pflag.Flag]CompletionFunc{} // lock for reading and writing from flagCompletionFunctions var flagCompletionMutex = &sync.RWMutex{} @@ -115,22 +117,50 @@ type CompletionOptions struct { HiddenDefaultCmd bool } +// Completion is a string that can be used for completions +// +// two formats are supported: +// - the completion choice +// - the completion choice with a textual description (separated by a TAB). +// +// [CompletionWithDesc] can be used to create a completion string with a textual description. +// +// Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. +type Completion = string + +// CompletionFunc is a function that provides completion results. +type CompletionFunc = func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) + +// CompletionWithDesc returns a [Completion] with a description by using the TAB delimited format. +func CompletionWithDesc(choice string, description string) Completion { + return choice + "\t" + description +} + // NoFileCompletions can be used to disable file completion for commands that should // not trigger file completions. -func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method satisfies [CompletionFunc]. +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return nil, ShellCompDirectiveNoFileComp } // FixedCompletions can be used to create a completion function which always // returns the same results. -func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method returns a function that satisfies [CompletionFunc] +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func FixedCompletions(choices []Completion, directive ShellCompDirective) CompletionFunc { + return func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return choices, directive } } // RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { +// +// You can use pre-defined completion functions such as [FixedCompletions] or [NoFileCompletions], +// or you can define your own. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f CompletionFunc) error { flag := c.Flag(flagName) if flag == nil { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) @@ -146,7 +176,7 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman } // GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. -func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { +func (c *Command) GetFlagCompletionFunc(flagName string) (CompletionFunc, bool) { flag := c.Flag(flagName) if flag == nil { return nil, false @@ -211,24 +241,29 @@ func (c *Command) initCompleteCmd(args []string) { // 2- Even without completions, we need to print the directive } - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue } if noDescriptions { // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] + comp = strings.SplitN(comp, "\t", 2)[0] } // Make sure we only write the first line to the output. // This is needed if a description contains a linebreak. // Otherwise the shell scripts will interpret the other lines as new flags // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] + comp = strings.SplitN(comp, "\n", 2)[0] // Finally trim the completion. This is especially important to get rid // of a trailing tab when there are no description following it. @@ -237,14 +272,14 @@ func (c *Command) initCompleteCmd(args []string) { // although there is no description). comp = strings.TrimSpace(comp) - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) } // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + fmt.Fprintf(out, ":%d\n", directive) // Print some helpful info to stderr for the user to understand. // Output from stderr must be ignored by the completion script. @@ -263,7 +298,15 @@ func (c *Command) initCompleteCmd(args []string) { } } -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { +// SliceValue is a reduced version of [pflag.SliceValue]. It is used to detect +// flags that accept multiple values and therefore can provide completion +// multiple times. +type SliceValue interface { + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + +func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCompDirective, error) { // The last argument, which is not completely typed by the user, // should not be part of the list of arguments toComplete := args[len(args)-1] @@ -291,7 +334,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + return c, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -321,7 +364,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // Parse the flags early so we can check if required flags are set if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + return finalCmd, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) } realArgCount := finalCmd.Flags().NArg() @@ -333,14 +376,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { - return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } // Look for the --help or --version flags. If they are present, // there should be no further completions. if helpOrVersionFlagPresent(finalCmd) { - return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil + return finalCmd, []Completion{}, ShellCompDirectiveNoFileComp, nil } // We only remove the flags from the arguments if DisableFlagParsing is not set. @@ -369,11 +412,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil } // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + return finalCmd, []Completion{}, ShellCompDirectiveFilterDirs, nil } } - var completions []string + var completions []Completion var directive ShellCompDirective // Enforce flag groups before doing flag completions @@ -392,10 +435,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // If we have not found any required flags, only then can we show regular flags if len(completions) == 0 { doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || + _, acceptsMultiple := flag.Value.(SliceValue) + acceptsMultiple = acceptsMultiple || strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + strings.Contains(flag.Value.Type(), "Array") || + strings.HasPrefix(flag.Value.Type(), "stringTo") + + if !flag.Changed || acceptsMultiple { + // If the flag is not already present, or if it can be specified multiple times (Array, Slice, or stringTo) // we suggest it as a completion completions = append(completions, getFlagNameCompletions(flag, toComplete)...) } @@ -455,7 +502,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi for _, subCmd := range finalCmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } directive = ShellCompDirectiveNoFileComp } @@ -500,7 +547,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + var completionFn CompletionFunc if flag != nil && flagCompletion { flagCompletionMutex.RLock() completionFn = flagCompletionFunctions[flag] @@ -511,7 +558,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if completionFn != nil { // Go custom completion defined for this flag or command. // Call the registered completion function to get the completions. - var comps []string + var comps []Completion comps, directive = completionFn(finalCmd, finalArgs, toComplete) completions = append(completions, comps...) } @@ -524,23 +571,23 @@ func helpOrVersionFlagPresent(cmd *Command) bool { len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { return true } - if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && + if helpFlag := cmd.Flags().Lookup(helpFlagName); helpFlag != nil && len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { return true } return false } -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []Completion { if nonCompletableFlag(flag) { - return []string{} + return []Completion{} } - var completions []string + var completions []Completion flagName := "--" + flag.Name if strings.HasPrefix(flagName, toComplete) { // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // Why suggest both long forms: --flag and --flag= ? // This forces the user to *always* have to type either an = or a space after the flag name. @@ -552,20 +599,20 @@ func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { // if len(flag.NoOptDefVal) == 0 { // // Flag requires a value, so it can be suffixed with = // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // } } flagName = "-" + flag.Shorthand if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) } return completions } -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string +func completeRequireFlags(finalCmd *Command, toComplete string) []Completion { + var completions []Completion doCompleteRequiredFlags := func(flag *pflag.Flag) { if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { @@ -680,8 +727,8 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p // 1- the feature has been explicitly disabled by the program, // 2- c has no subcommands (to avoid creating one), // 3- c already has a 'completion' command provided by the program. -func (c *Command) InitDefaultCompletionCmd() { - if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { +func (c *Command) InitDefaultCompletionCmd(args ...string) { + if c.CompletionOptions.DisableDefaultCmd { return } @@ -694,6 +741,16 @@ func (c *Command) InitDefaultCompletionCmd() { haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + // Special case to know if there are sub-commands or not. + hasSubCommands := false + for _, cmd := range c.commands { + if cmd.Name() != ShellCompRequestCmd && cmd.Name() != helpCommandName { + // We found a real sub-command (not 'help' or '__complete') + hasSubCommands = true + break + } + } + completionCmd := &Command{ Use: compCmdName, Short: "Generate the autocompletion script for the specified shell", @@ -707,6 +764,22 @@ See each sub-command's help for details on how to use the generated script. } c.AddCommand(completionCmd) + if !hasSubCommands { + // If the 'completion' command will be the only sub-command, + // we only create it if it is actually being called. + // This avoids breaking programs that would suddenly find themselves with + // a subcommand, which would prevent them from accepting arguments. + // We also create the 'completion' command if the user is triggering + // shell completion for it (prog __complete completion '') + subCmd, cmdArgs, err := c.Find(args) + if err != nil || subCmd.Name() != compCmdName && + !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + // The completion command is not being called or being completed so we remove it. + c.RemoveCommand(completionCmd) + return + } + } + out := c.OutOrStdout() noDesc := c.CompletionOptions.DisableDescriptions shortDesc := "Generate the autocompletion script for %s" @@ -899,3 +972,34 @@ func CompError(msg string) { func CompErrorln(msg string) { CompError(fmt.Sprintf("%s\n", msg)) } + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 0671ec5f..560612fd 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -23,9 +23,9 @@ import ( ) const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - oneRequired = "cobra_annotation_one_required" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" ) // MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors @@ -37,7 +37,7 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -53,7 +53,7 @@ func (c *Command) MarkFlagsOneRequired(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) } - if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -70,7 +70,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) } // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { panic(err) } } @@ -91,9 +91,9 @@ func (c *Command) ValidateFlagGroups() error { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { @@ -130,7 +130,7 @@ func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annota continue } - groupStatus[group] = map[string]bool{} + groupStatus[group] = make(map[string]bool, len(flagnames)) for _, name := range flagnames { groupStatus[group][name] = false } @@ -232,9 +232,9 @@ func (c *Command) enforceFlagGroupsForCompletion() { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) // If a flag that is part of a group is present, we make all the other flags @@ -253,17 +253,17 @@ func (c *Command) enforceFlagGroupsForCompletion() { // If none of the flags of a one-required group are present, we make all the flags // of that group required so that the shell completion suggests them automatically for flagList, flagnameAndStatus := range oneRequiredGroupStatus { - set := 0 + isSet := false - for _, isSet := range flagnameAndStatus { + for _, isSet = range flagnameAndStatus { if isSet { - set++ + break } } // None of the flags of the group are set, mark all flags in the group // as required - if set == 0 { + if !isSet { for _, fName := range strings.Split(flagList, " ") { _ = c.MarkFlagRequired(fName) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 55195193..746dcb92 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -28,8 +28,8 @@ import ( func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { @@ -162,7 +162,10 @@ filter __%[1]s_escapeStringWithSpecialChars { if (-Not $Description) { $Description = " " } - @{Name="$Name";Description="$Description"} + New-Object -TypeName PSCustomObject -Property @{ + Name = "$Name" + Description = "$Description" + } } @@ -240,7 +243,12 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Only one completion left" # insert space after value - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } else { # Add the proper number of spaces to align the descriptions @@ -255,7 +263,12 @@ filter __%[1]s_escapeStringWithSpecialChars { $Description = " ($($comp.Description))" } - [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + $CompletionText = "$($comp.Name)$Description" + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } @@ -264,7 +277,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # insert space after value # MenuComplete will automatically show the ToolTip of # the highlighted value at the bottom of the suggestions. - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } # TabCompleteNext and in case we get something unknown @@ -272,7 +291,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # Like MenuComplete but we don't want to add a space here because # the user need to press space anyway to get the completion. # Description will not be shown because that's not possible with TabCompleteNext - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/spf13/pflag/.editorconfig similarity index 69% rename from vendor/github.com/fsnotify/fsnotify/.editorconfig rename to vendor/github.com/spf13/pflag/.editorconfig index fad89585..4492e9f9 100644 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ b/vendor/github.com/spf13/pflag/.editorconfig @@ -1,12 +1,12 @@ root = true -[*.go] -indent_style = tab +[*] +charset = utf-8 +end_of_line = lf indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] indent_style = space -indent_size = 2 insert_final_newline = true trim_trailing_whitespace = true + +[*.go] +indent_style = tab diff --git a/vendor/github.com/spf13/pflag/.golangci.yaml b/vendor/github.com/spf13/pflag/.golangci.yaml new file mode 100644 index 00000000..b274f248 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.golangci.yaml @@ -0,0 +1,4 @@ +linters: + disable-all: true + enable: + - nolintlint diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 24a5036e..7c058de3 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -160,7 +160,7 @@ type FlagSet struct { args []string // arguments after flags argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor + output io.Writer // nil means stderr; use Output() accessor interspersed bool // allow interspersed option/non-option args normalizeNameFunc func(f *FlagSet, name string) NormalizedName @@ -255,13 +255,20 @@ func (f *FlagSet) normalizeFlagName(name string) NormalizedName { return n(f, name) } -func (f *FlagSet) out() io.Writer { +// Output returns the destination for usage and error messages. os.Stderr is returned if +// output was not set or was set to nil. +func (f *FlagSet) Output() io.Writer { if f.output == nil { return os.Stderr } return f.output } +// Name returns the name of the flag set. +func (f *FlagSet) Name() string { + return f.name +} + // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { @@ -358,7 +365,7 @@ func (f *FlagSet) ShorthandLookup(name string) *Flag { } if len(name) > 1 { msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } c := name[0] @@ -482,7 +489,7 @@ func (f *FlagSet) Set(name, value string) error { } if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) } return nil } @@ -523,7 +530,7 @@ func Set(name, value string) error { // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) + fmt.Fprint(f.Output(), usages) } // defaultIsZeroValue returns true if the default value for this flag represents @@ -758,7 +765,7 @@ func PrintDefaults() { // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name) f.PrintDefaults() } @@ -844,7 +851,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { _, alreadyThere := f.formal[normalizedFlagName] if alreadyThere { msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) + fmt.Fprintln(f.Output(), msg) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { @@ -860,7 +867,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { } if len(flag.Shorthand) > 1 { msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } if f.shorthands == nil { @@ -870,7 +877,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { used, alreadyThere := f.shorthands[c] if alreadyThere { msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } f.shorthands[c] = flag @@ -909,7 +916,7 @@ func VarP(value Value, name, shorthand, usage string) { func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.out(), err) + fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -1060,7 +1067,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse } if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) } err = fn(flag, value) diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go index 3d414ba6..06b8bcb5 100644 --- a/vendor/github.com/spf13/pflag/ip.go +++ b/vendor/github.com/spf13/pflag/ip.go @@ -16,6 +16,9 @@ func newIPValue(val net.IP, p *net.IP) *ipValue { func (i *ipValue) String() string { return net.IP(*i).String() } func (i *ipValue) Set(s string) error { + if s == "" { + return nil + } ip := net.ParseIP(strings.TrimSpace(s)) if ip == nil { return fmt.Errorf("failed to parse IP: %q", s) diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go new file mode 100644 index 00000000..6b541aa8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -0,0 +1,147 @@ +package pflag + +import ( + "fmt" + "io" + "net" + "strings" +) + +// -- ipNetSlice Value +type ipNetSliceValue struct { + value *[]net.IPNet + changed bool +} + +func newIPNetSliceValue(val []net.IPNet, p *[]net.IPNet) *ipNetSliceValue { + ipnsv := new(ipNetSliceValue) + ipnsv.value = p + *ipnsv.value = val + return ipnsv +} + +// Set converts, and assigns, the comma-separated IPNet argument string representation as the []net.IPNet value of this flag. +// If Set is called on a flag that already has a []net.IPNet assigned, the newly converted values will be appended. +func (s *ipNetSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + ipNetStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse ip values into slice + out := make([]net.IPNet, 0, len(ipNetStrSlice)) + for _, ipNetStr := range ipNetStrSlice { + _, n, err := net.ParseCIDR(strings.TrimSpace(ipNetStr)) + if err != nil { + return fmt.Errorf("invalid string being converted to CIDR: %s", ipNetStr) + } + out = append(out, *n) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *ipNetSliceValue) Type() string { + return "ipNetSlice" +} + +// String defines a "native" format for this net.IPNet slice flag value. +func (s *ipNetSliceValue) String() string { + + ipNetStrSlice := make([]string, len(*s.value)) + for i, n := range *s.value { + ipNetStrSlice[i] = n.String() + } + + out, _ := writeAsCSV(ipNetStrSlice) + return "[" + out + "]" +} + +func ipNetSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Emtpy string would cause a slice with one (empty) entry + if len(val) == 0 { + return []net.IPNet{}, nil + } + ss := strings.Split(val, ",") + out := make([]net.IPNet, len(ss)) + for i, sval := range ss { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err != nil { + return nil, fmt.Errorf("invalid string being converted to CIDR: %s", sval) + } + out[i] = *n + } + return out, nil +} + +// GetIPNetSlice returns the []net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNetSlice(name string) ([]net.IPNet, error) { + val, err := f.getFlagType(name, "ipNetSlice", ipNetSliceConv) + if err != nil { + return []net.IPNet{}, err + } + return val.([]net.IPNet), nil +} + +// IPNetSliceVar defines a ipNetSlice flag with specified name, default value, and usage string. +// The argument p points to a []net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { + f.VarP(newIPNetSliceValue(value, p), name, "", usage) +} + +// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { + f.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) +} + +// IPNetSliceVar defines a []net.IPNet flag with specified name, default value, and usage string. +// The argument p points to a []net.IPNet variable in which to store the value of the flag. +func IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { + CommandLine.VarP(newIPNetSliceValue(value, p), name, "", usage) +} + +// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { + CommandLine.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) +} + +// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of a []net.IPNet variable that stores the value of that flag. +func (f *FlagSet) IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { + p := []net.IPNet{} + f.IPNetSliceVarP(&p, name, "", value, usage) + return &p +} + +// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { + p := []net.IPNet{} + f.IPNetSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of the flag. +func IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { + return CommandLine.IPNetSliceP(name, "", value, usage) +} + +// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. +func IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { + return CommandLine.IPNetSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index 4894af81..d1ff0a96 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -31,11 +31,7 @@ func (s *stringArrayValue) Append(val string) error { func (s *stringArrayValue) Replace(val []string) error { out := make([]string, len(val)) for i, d := range val { - var err error out[i] = d - if err != nil { - return err - } } *s.value = out return nil diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index b96180b3..3fc7d84f 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -11,7 +11,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.19-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -39,7 +39,7 @@ Many Go projects are built using Viper including: go get github.com/spf13/viper ``` -**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. +**Note:** Viper uses [Go Modules](https://go.dev/wiki/Modules) to manage dependencies. ## What is Viper? @@ -420,7 +420,7 @@ flags, or environment variables. Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`. -Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve +Viper uses [crypt](https://github.com/sagikazarmark/crypt) to retrieve configuration from the K/V store, which means that you can store your configuration values encrypted and have them automatically decrypted if you have the correct gpg keyring. Encryption is optional. @@ -432,7 +432,7 @@ independently of it. K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. ```bash -$ go get github.com/bketelsen/crypt/bin/crypt +$ go get github.com/sagikazarmark/crypt/bin/crypt $ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json ``` diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md index c4e36c68..b68993d4 100644 --- a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -15,10 +15,10 @@ cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: ``` As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. -Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). +Viper opted to use [Go Modules](https://go.dev/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). The solution is easy: switch to using Go Modules. -Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. +Please refer to the [wiki](https://go.dev/wiki/Modules) on how to do that. **tl;dr* `export GO111MODULE=on` diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock index 78da5109..3840614f 100644 --- a/vendor/github.com/spf13/viper/flake.lock +++ b/vendor/github.com/spf13/viper/flake.lock @@ -8,11 +8,11 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1687972261, - "narHash": "sha256-+mxvZfwMVoaZYETmuQWqTi/7T9UKoAE+WpdSQkOVJ2g=", + "lastModified": 1707817777, + "narHash": "sha256-vHyIs1OULQ3/91wD6xOiuayfI71JXALGA5KLnDKAcy0=", "owner": "cachix", "repo": "devenv", - "rev": "e85df562088573305e55906eaa964341f8cb0d9f", + "rev": "5a30b9e5ac7c6167e61b1f4193d5130bb9f8defa", "type": "github" }, "original": { @@ -42,11 +42,11 @@ "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1687762428, - "narHash": "sha256-DIf7mi45PKo+s8dOYF+UlXHzE0Wl/+k3tXUyAoAnoGE=", + "lastModified": 1706830856, + "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "37dd7bb15791c86d55c5121740a1887ab55ee836", + "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f", "type": "github" }, "original": { @@ -56,12 +56,15 @@ } }, "flake-utils": { + "inputs": { + "systems": "systems" + }, "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", "owner": "numtide", "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", "type": "github" }, "original": { @@ -151,11 +154,11 @@ "nixpkgs-lib": { "locked": { "dir": "lib", - "lastModified": 1685564631, - "narHash": "sha256-8ywr3AkblY4++3lIVxmrWZFzac7+f32ZEhH/A8pNscI=", + "lastModified": 1706550542, + "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "4f53efe34b3a8877ac923b9350c874e3dcd5dc0a", + "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652", "type": "github" }, "original": { @@ -184,27 +187,27 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1678872516, - "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=", + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.11", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1687886075, - "narHash": "sha256-PeayJDDDy+uw1Ats4moZnRdL1OFuZm1Tj+KiHlD67+o=", + "lastModified": 1707939175, + "narHash": "sha256-D1xan0lgxbmXDyzVqXTiSYHLmAMrMRdD+alKzEO/p3w=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "a565059a348422af5af9026b5174dc5c0dcefdae", + "rev": "f7e8132daca31b1e3859ac0fb49741754375ac3d", "type": "github" }, "original": { @@ -229,11 +232,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1686050334, - "narHash": "sha256-R0mczWjDzBpIvM3XXhO908X5e2CQqjyh/gFbwZk/7/Q=", + "lastModified": 1704725188, + "narHash": "sha256-qq8NbkhRZF1vVYQFt1s8Mbgo8knj+83+QlL5LBnYGpI=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "6881eb2ae5d8a3516e34714e7a90d9d95914c4dc", + "rev": "ea96f0c05924341c551a797aaba8126334c505d2", "type": "github" }, "original": { @@ -248,6 +251,21 @@ "flake-parts": "flake-parts", "nixpkgs": "nixpkgs_2" } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix index 9b26c3fc..0230668c 100644 --- a/vendor/github.com/spf13/viper/flake.nix +++ b/vendor/github.com/spf13/viper/flake.nix @@ -20,6 +20,7 @@ default = { languages = { go.enable = true; + go.package = pkgs.go_1_22; }; pre-commit.hooks = { diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 20eb4da1..da68d994 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -624,7 +624,7 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { // To retrieve a config file called myapp.json from /configs/myapp.json // you should set path to /configs and set config name (SetConfigName()) to // "myapp". -// Secure Remote Providers are implemented with github.com/bketelsen/crypt. +// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) } @@ -1791,12 +1791,6 @@ func (v *Viper) writeConfig(filename string, force bool) error { return f.Sync() } -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]any) error { - return v.unmarshalReader(in, c) -} - func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { buf := new(bytes.Buffer) buf.ReadFrom(in) diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE new file mode 100644 index 00000000..85541be2 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 The Update Framework Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE new file mode 100644 index 00000000..09005219 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE @@ -0,0 +1,9 @@ +Copyright 2024 The Update Framework Authors + +Apache 2.0 License +Copyright 2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (/). + +SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go new file mode 100644 index 00000000..cd8cc98a --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package config + +import ( + "net/url" + "os" + + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +type UpdaterConfig struct { + // TUF configuration + MaxRootRotations int64 + MaxDelegations int + RootMaxLength int64 + TimestampMaxLength int64 + SnapshotMaxLength int64 + TargetsMaxLength int64 + // Updater configuration + Fetcher fetcher.Fetcher + LocalTrustedRoot []byte + LocalMetadataDir string + LocalTargetsDir string + RemoteMetadataURL string + RemoteTargetsURL string + DisableLocalCache bool + PrefixTargetsWithHash bool + // UnsafeLocalMode only uses the metadata as written on disk + // if the metadata is incomplete, calling updater.Refresh will fail + UnsafeLocalMode bool +} + +// New creates a new UpdaterConfig instance used by the Updater to +// store configuration +func New(remoteURL string, rootBytes []byte) (*UpdaterConfig, error) { + // Default URL for target files - /targets + targetsURL, err := url.JoinPath(remoteURL, "targets") + if err != nil { + return nil, err + } + + return &UpdaterConfig{ + // TUF configuration + MaxRootRotations: 256, + MaxDelegations: 32, + RootMaxLength: 512000, // bytes + TimestampMaxLength: 16384, // bytes + SnapshotMaxLength: 2000000, // bytes + TargetsMaxLength: 5000000, // bytes + // Updater configuration + Fetcher: &fetcher.DefaultFetcher{}, // use the default built-in download fetcher + LocalTrustedRoot: rootBytes, // trusted root.json + RemoteMetadataURL: remoteURL, // URL of where the TUF metadata is + RemoteTargetsURL: targetsURL, // URL of where the target files should be downloaded from + DisableLocalCache: false, // enable local caching of trusted metadata + PrefixTargetsWithHash: true, // use hash-prefixed target files with consistent snapshots + UnsafeLocalMode: false, + }, nil +} + +func (cfg *UpdaterConfig) EnsurePathsExist() error { + if cfg.DisableLocalCache { + return nil + } + + for _, path := range []string{cfg.LocalMetadataDir, cfg.LocalTargetsDir} { + if err := os.MkdirAll(path, os.ModePerm); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go new file mode 100644 index 00000000..3bd8e5ea --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go @@ -0,0 +1,246 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "fmt" +) + +// Define TUF error types used inside the new modern implementation. +// The names chosen for TUF error types should start in 'Err' except where +// there is a good reason not to, and provide that reason in those cases. + +// Repository errors + +// ErrRepository - an error with a repository's state, such as a missing file. +// It covers all exceptions that come from the repository side when +// looking from the perspective of users of metadata API or client +type ErrRepository struct { + Msg string +} + +func (e *ErrRepository) Error() string { + return fmt.Sprintf("repository error: %s", e.Msg) +} + +func (e *ErrRepository) Is(target error) bool { + _, ok := target.(*ErrRepository) + return ok +} + +// ErrUnsignedMetadata - An error about metadata object with insufficient threshold of signatures +type ErrUnsignedMetadata struct { + Msg string +} + +func (e *ErrUnsignedMetadata) Error() string { + return fmt.Sprintf("unsigned metadata error: %s", e.Msg) +} + +// ErrUnsignedMetadata is a subset of ErrRepository +func (e *ErrUnsignedMetadata) Is(target error) bool { + if _, ok := target.(*ErrUnsignedMetadata); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrBadVersionNumber - An error for metadata that contains an invalid version number +type ErrBadVersionNumber struct { + Msg string +} + +func (e *ErrBadVersionNumber) Error() string { + return fmt.Sprintf("bad version number error: %s", e.Msg) +} + +// ErrBadVersionNumber is a subset of ErrRepository +func (e *ErrBadVersionNumber) Is(target error) bool { + if _, ok := target.(*ErrBadVersionNumber); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrEqualVersionNumber - An error for metadata containing a previously verified version number +type ErrEqualVersionNumber struct { + Msg string +} + +func (e *ErrEqualVersionNumber) Error() string { + return fmt.Sprintf("equal version number error: %s", e.Msg) +} + +// ErrEqualVersionNumber is a subset of both ErrRepository and ErrBadVersionNumber +func (e *ErrEqualVersionNumber) Is(target error) bool { + if _, ok := target.(*ErrEqualVersionNumber); ok { + return true + } + if _, ok := target.(*ErrBadVersionNumber); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrExpiredMetadata - Indicate that a TUF Metadata file has expired +type ErrExpiredMetadata struct { + Msg string +} + +func (e *ErrExpiredMetadata) Error() string { + return fmt.Sprintf("expired metadata error: %s", e.Msg) +} + +// ErrExpiredMetadata is a subset of ErrRepository +func (e *ErrExpiredMetadata) Is(target error) bool { + if _, ok := target.(*ErrExpiredMetadata); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrLengthOrHashMismatch - An error while checking the length and hash values of an object +type ErrLengthOrHashMismatch struct { + Msg string +} + +func (e *ErrLengthOrHashMismatch) Error() string { + return fmt.Sprintf("length/hash verification error: %s", e.Msg) +} + +// ErrLengthOrHashMismatch is a subset of ErrRepository +func (e *ErrLengthOrHashMismatch) Is(target error) bool { + if _, ok := target.(*ErrLengthOrHashMismatch); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// Download errors + +// ErrDownload - An error occurred while attempting to download a file +type ErrDownload struct { + Msg string +} + +func (e *ErrDownload) Error() string { + return fmt.Sprintf("download error: %s", e.Msg) +} + +func (e *ErrDownload) Is(target error) bool { + _, ok := target.(*ErrDownload) + return ok +} + +// ErrDownloadLengthMismatch - Indicate that a mismatch of lengths was seen while downloading a file +type ErrDownloadLengthMismatch struct { + Msg string +} + +func (e *ErrDownloadLengthMismatch) Error() string { + return fmt.Sprintf("download length mismatch error: %s", e.Msg) +} + +// ErrDownloadLengthMismatch is a subset of ErrDownload +func (e *ErrDownloadLengthMismatch) Is(target error) bool { + if _, ok := target.(*ErrDownloadLengthMismatch); ok { + return true + } + if _, ok := target.(*ErrDownload); ok { + return true + } + return false +} + +// ErrDownloadHTTP - Returned by Fetcher interface implementations for HTTP errors +type ErrDownloadHTTP struct { + StatusCode int + URL string +} + +func (e *ErrDownloadHTTP) Error() string { + return fmt.Sprintf("failed to download %s, http status code: %d", e.URL, e.StatusCode) +} + +// ErrDownloadHTTP is a subset of ErrDownload +func (e *ErrDownloadHTTP) Is(target error) bool { + if _, ok := target.(*ErrDownloadHTTP); ok { + return true + } + if _, ok := target.(*ErrDownload); ok { + return true + } + return false +} + +// ValueError +type ErrValue struct { + Msg string +} + +func (e *ErrValue) Error() string { + return fmt.Sprintf("value error: %s", e.Msg) +} + +func (e *ErrValue) Is(err error) bool { + _, ok := err.(*ErrValue) + return ok +} + +// TypeError +type ErrType struct { + Msg string +} + +func (e *ErrType) Error() string { + return fmt.Sprintf("type error: %s", e.Msg) +} + +func (e *ErrType) Is(err error) bool { + _, ok := err.(*ErrType) + return ok +} + +// RuntimeError +type ErrRuntime struct { + Msg string +} + +func (e *ErrRuntime) Error() string { + return fmt.Sprintf("runtime error: %s", e.Msg) +} + +func (e *ErrRuntime) Is(err error) bool { + _, ok := err.(*ErrRuntime) + return ok +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go new file mode 100644 index 00000000..dfe2bc14 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go @@ -0,0 +1,93 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package fetcher + +import ( + "fmt" + "io" + "net/http" + "strconv" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// Fetcher interface +type Fetcher interface { + DownloadFile(urlPath string, maxLength int64, timeout time.Duration) ([]byte, error) +} + +// DefaultFetcher implements Fetcher +type DefaultFetcher struct { + httpUserAgent string +} + +func (d *DefaultFetcher) SetHTTPUserAgent(httpUserAgent string) { + d.httpUserAgent = httpUserAgent +} + +// DownloadFile downloads a file from urlPath, errors out if it failed, +// its length is larger than maxLength or the timeout is reached. +func (d *DefaultFetcher) DownloadFile(urlPath string, maxLength int64, timeout time.Duration) ([]byte, error) { + client := &http.Client{Timeout: timeout} + req, err := http.NewRequest("GET", urlPath, nil) + if err != nil { + return nil, err + } + // Use in case of multiple sessions. + if d.httpUserAgent != "" { + req.Header.Set("User-Agent", d.httpUserAgent) + } + // Execute the request. + res, err := client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + // Handle HTTP status codes. + if res.StatusCode != http.StatusOK { + return nil, &metadata.ErrDownloadHTTP{StatusCode: res.StatusCode, URL: urlPath} + } + var length int64 + // Get content length from header (might not be accurate, -1 or not set). + if header := res.Header.Get("Content-Length"); header != "" { + length, err = strconv.ParseInt(header, 10, 0) + if err != nil { + return nil, err + } + // Error if the reported size is greater than what is expected. + if length > maxLength { + return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)} + } + } + // Although the size has been checked above, use a LimitReader in case + // the reported size is inaccurate, or size is -1 which indicates an + // unknown length. We read maxLength + 1 in order to check if the read data + // surpased our set limit. + data, err := io.ReadAll(io.LimitReader(res.Body, maxLength+1)) + if err != nil { + return nil, err + } + // Error if the reported size is greater than what is expected. + length = int64(len(data)) + if length > maxLength { + return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)} + } + + return data, nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go new file mode 100644 index 00000000..57e38612 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +const ( + KeyTypeEd25519 = "ed25519" + KeyTypeECDSA_SHA2_P256_COMPAT = "ecdsa-sha2-nistp256" + KeyTypeECDSA_SHA2_P256 = "ecdsa" + KeyTypeRSASSA_PSS_SHA256 = "rsa" + KeySchemeEd25519 = "ed25519" + KeySchemeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256" + KeySchemeECDSA_SHA2_P384 = "ecdsa-sha2-nistp384" + KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256" +) + +// ToPublicKey generate crypto.PublicKey from metadata type Key +func (k *Key) ToPublicKey() (crypto.PublicKey, error) { + switch k.Type { + case KeyTypeRSASSA_PSS_SHA256: + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey)) + if err != nil { + return nil, err + } + rsaKey, ok := publicKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid rsa public key") + } + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(rsaKey); err != nil { + return nil, err + } + return rsaKey, nil + case KeyTypeECDSA_SHA2_P256, KeyTypeECDSA_SHA2_P256_COMPAT: // handle "ecdsa" too as python-tuf/sslib keys are using it for keytype instead of https://theupdateframework.github.io/specification/latest/index.html#keytype-ecdsa-sha2-nistp256 + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey)) + if err != nil { + return nil, err + } + ecdsaKey, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid ecdsa public key") + } + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(ecdsaKey); err != nil { + return nil, err + } + return ecdsaKey, nil + case KeyTypeEd25519: + publicKey, err := hex.DecodeString(k.Value.PublicKey) + if err != nil { + return nil, err + } + ed25519Key := ed25519.PublicKey(publicKey) + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(ed25519Key); err != nil { + return nil, err + } + return ed25519Key, nil + } + return nil, fmt.Errorf("unsupported public key type") +} + +// KeyFromPublicKey generate metadata type Key from crypto.PublicKey +func KeyFromPublicKey(k crypto.PublicKey) (*Key, error) { + key := &Key{} + switch k := k.(type) { + case *rsa.PublicKey: + key.Type = KeyTypeRSASSA_PSS_SHA256 + key.Scheme = KeySchemeRSASSA_PSS_SHA256 + pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k) + if err != nil { + return nil, err + } + key.Value.PublicKey = string(pemKey) + case *ecdsa.PublicKey: + key.Type = KeyTypeECDSA_SHA2_P256 + key.Scheme = KeySchemeECDSA_SHA2_P256 + pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k) + if err != nil { + return nil, err + } + key.Value.PublicKey = string(pemKey) + case ed25519.PublicKey: + key.Type = KeyTypeEd25519 + key.Scheme = KeySchemeEd25519 + key.Value.PublicKey = hex.EncodeToString(k) + default: + return nil, fmt.Errorf("unsupported public key type") + } + return key, nil +} + +// ID returns the keyID value for the given Key +func (k *Key) ID() string { + // the identifier is a hexdigest of the SHA-256 hash of the canonical form of the key + if k.id == "" { + data, err := cjson.EncodeCanonical(k) + if err != nil { + panic(fmt.Errorf("error creating key ID: %w", err)) + } + digest := sha256.Sum256(data) + k.id = hex.EncodeToString(digest[:]) + } + return k.id +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go new file mode 100644 index 00000000..c7cab383 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go @@ -0,0 +1,45 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +var log Logger = DiscardLogger{} + +// Logger partially implements the go-log/logr's interface: +// https://github.com/go-logr/logr/blob/master/logr.go +type Logger interface { + // Info logs a non-error message with key/value pairs + Info(msg string, kv ...any) + // Error logs an error with a given message and key/value pairs. + Error(err error, msg string, kv ...any) +} + +type DiscardLogger struct{} + +func (d DiscardLogger) Info(msg string, kv ...any) { +} + +func (d DiscardLogger) Error(err error, msg string, kv ...any) { +} + +func SetLogger(logger Logger) { + log = logger +} + +func GetLogger() Logger { + return log +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go new file mode 100644 index 00000000..bd3f1e44 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go @@ -0,0 +1,567 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/hex" + "encoding/json" + "errors" +) + +// The following marshal/unmarshal methods override the default behavior for for each TUF type +// in order to support unrecognized fields + +func (signed RootType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["consistent_snapshot"] = signed.ConsistentSnapshot + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["keys"] = signed.Keys + dict["roles"] = signed.Roles + return json.Marshal(dict) +} + +func (signed *RootType) UnmarshalJSON(data []byte) error { + type Alias RootType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = RootType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "consistent_snapshot") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "keys") + delete(dict, "roles") + signed.UnrecognizedFields = dict + return nil +} + +func (signed SnapshotType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["meta"] = signed.Meta + return json.Marshal(dict) +} + +func (signed *SnapshotType) UnmarshalJSON(data []byte) error { + type Alias SnapshotType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = SnapshotType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "meta") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TimestampType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["meta"] = signed.Meta + return json.Marshal(dict) +} + +func (signed *TimestampType) UnmarshalJSON(data []byte) error { + type Alias TimestampType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TimestampType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "meta") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TargetsType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["targets"] = signed.Targets + if signed.Delegations != nil { + dict["delegations"] = signed.Delegations + } + return json.Marshal(dict) +} + +func (signed *TargetsType) UnmarshalJSON(data []byte) error { + type Alias TargetsType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TargetsType(s) + + // populate the path field for each target + for name, targetFile := range signed.Targets { + targetFile.Path = name + } + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "targets") + delete(dict, "delegations") + signed.UnrecognizedFields = dict + return nil +} + +func (signed MetaFiles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + // length and hashes are optional + if signed.Length != 0 { + dict["length"] = signed.Length + } + if len(signed.Hashes) != 0 { + dict["hashes"] = signed.Hashes + } + dict["version"] = signed.Version + return json.Marshal(dict) +} + +func (signed *MetaFiles) UnmarshalJSON(data []byte) error { + type Alias MetaFiles + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = MetaFiles(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "length") + delete(dict, "hashes") + delete(dict, "version") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TargetFiles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["length"] = signed.Length + dict["hashes"] = signed.Hashes + if signed.Custom != nil { + dict["custom"] = signed.Custom + } + return json.Marshal(dict) +} + +func (signed *TargetFiles) UnmarshalJSON(data []byte) error { + type Alias TargetFiles + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TargetFiles(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "length") + delete(dict, "hashes") + delete(dict, "custom") + signed.UnrecognizedFields = dict + return nil +} + +func (key Key) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(key.UnrecognizedFields) != 0 { + copyMapValues(key.UnrecognizedFields, dict) + } + dict["keytype"] = key.Type + dict["scheme"] = key.Scheme + dict["keyval"] = key.Value + return json.Marshal(dict) +} + +func (key *Key) UnmarshalJSON(data []byte) error { + type Alias Key + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + // nolint + *key = Key(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keytype") + delete(dict, "scheme") + delete(dict, "keyval") + key.UnrecognizedFields = dict + return nil +} + +func (meta Metadata[T]) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(meta.UnrecognizedFields) != 0 { + copyMapValues(meta.UnrecognizedFields, dict) + } + dict["signed"] = meta.Signed + dict["signatures"] = meta.Signatures + return json.Marshal(dict) +} + +func (meta *Metadata[T]) UnmarshalJSON(data []byte) error { + tmp := any(new(T)) + var m map[string]any + if err := json.Unmarshal(data, &m); err != nil { + return err + } + switch tmp.(type) { + case *RootType: + dict := struct { + Signed RootType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *SnapshotType: + dict := struct { + Signed SnapshotType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *TimestampType: + dict := struct { + Signed TimestampType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *TargetsType: + dict := struct { + Signed TargetsType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + default: + return &ErrValue{Msg: "unrecognized metadata type"} + } + delete(m, "signed") + delete(m, "signatures") + meta.UnrecognizedFields = m + return nil +} + +func (s Signature) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(s.UnrecognizedFields) != 0 { + copyMapValues(s.UnrecognizedFields, dict) + } + dict["keyid"] = s.KeyID + dict["sig"] = s.Signature + return json.Marshal(dict) +} + +func (s *Signature) UnmarshalJSON(data []byte) error { + type Alias Signature + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *s = Signature(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyid") + delete(dict, "sig") + s.UnrecognizedFields = dict + return nil +} + +func (kv KeyVal) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(kv.UnrecognizedFields) != 0 { + copyMapValues(kv.UnrecognizedFields, dict) + } + dict["public"] = kv.PublicKey + return json.Marshal(dict) +} + +func (kv *KeyVal) UnmarshalJSON(data []byte) error { + type Alias KeyVal + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *kv = KeyVal(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "public") + kv.UnrecognizedFields = dict + return nil +} + +func (role Role) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + return json.Marshal(dict) +} + +func (role *Role) UnmarshalJSON(data []byte) error { + type Alias Role + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = Role(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyids") + delete(dict, "threshold") + role.UnrecognizedFields = dict + return nil +} + +func (d Delegations) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(d.UnrecognizedFields) != 0 { + copyMapValues(d.UnrecognizedFields, dict) + } + // only one is allowed + dict["keys"] = d.Keys + if d.Roles != nil { + dict["roles"] = d.Roles + } else if d.SuccinctRoles != nil { + dict["succinct_roles"] = d.SuccinctRoles + } + return json.Marshal(dict) +} + +func (d *Delegations) UnmarshalJSON(data []byte) error { + type Alias Delegations + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *d = Delegations(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keys") + delete(dict, "roles") + delete(dict, "succinct_roles") + d.UnrecognizedFields = dict + return nil +} + +func (role DelegatedRole) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["name"] = role.Name + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + dict["terminating"] = role.Terminating + // make sure we have only one of the two (per spec) + if role.Paths != nil && role.PathHashPrefixes != nil { + return nil, &ErrValue{Msg: "failed to marshal: not allowed to have both \"paths\" and \"path_hash_prefixes\" present"} + } + if role.Paths != nil { + dict["paths"] = role.Paths + } else if role.PathHashPrefixes != nil { + dict["path_hash_prefixes"] = role.PathHashPrefixes + } + return json.Marshal(dict) +} + +func (role *DelegatedRole) UnmarshalJSON(data []byte) error { + type Alias DelegatedRole + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = DelegatedRole(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "name") + delete(dict, "keyids") + delete(dict, "threshold") + delete(dict, "terminating") + delete(dict, "paths") + delete(dict, "path_hash_prefixes") + role.UnrecognizedFields = dict + return nil +} + +func (role SuccinctRoles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + dict["bit_length"] = role.BitLength + dict["name_prefix"] = role.NamePrefix + return json.Marshal(dict) +} + +func (role *SuccinctRoles) UnmarshalJSON(data []byte) error { + type Alias SuccinctRoles + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = SuccinctRoles(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyids") + delete(dict, "threshold") + delete(dict, "bit_length") + delete(dict, "name_prefix") + role.UnrecognizedFields = dict + return nil +} + +func (b *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("tuf: invalid JSON hex bytes") + } + res := make([]byte, hex.DecodedLen(len(data)-2)) + _, err := hex.Decode(res, data[1:len(data)-1]) + if err != nil { + return err + } + *b = res + return nil +} + +func (b HexBytes) MarshalJSON() ([]byte, error) { + res := make([]byte, hex.EncodedLen(len(b))+2) + res[0] = '"' + res[len(res)-1] = '"' + hex.Encode(res[1:], b) + return res, nil +} + +func (b HexBytes) String() string { + return hex.EncodeToString(b) +} + +// copyMapValues copies the values of the src map to dst +func copyMapValues(src, dst map[string]any) { + for k, v := range src { + dst[k] = v + } +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go new file mode 100644 index 00000000..3e0a9e1a --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go @@ -0,0 +1,922 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "math" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "time" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/sigstore/sigstore/pkg/signature" +) + +// Root return new metadata instance of type Root +func Root(expires ...time.Time) *Metadata[RootType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + // populate Roles + roles := map[string]*Role{} + for _, r := range []string{ROOT, SNAPSHOT, TARGETS, TIMESTAMP} { + roles[r] = &Role{ + KeyIDs: []string{}, + Threshold: 1, + } + } + log.Info("Created metadata", "type", ROOT) + return &Metadata[RootType]{ + Signed: RootType{ + Type: ROOT, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Keys: map[string]*Key{}, + Roles: roles, + ConsistentSnapshot: true, + }, + Signatures: []Signature{}, + } +} + +// Snapshot return new metadata instance of type Snapshot +func Snapshot(expires ...time.Time) *Metadata[SnapshotType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", SNAPSHOT) + return &Metadata[SnapshotType]{ + Signed: SnapshotType{ + Type: SNAPSHOT, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Meta: map[string]*MetaFiles{ + "targets.json": { + Version: 1, + }, + }, + }, + Signatures: []Signature{}, + } +} + +// Timestamp return new metadata instance of type Timestamp +func Timestamp(expires ...time.Time) *Metadata[TimestampType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", TIMESTAMP) + return &Metadata[TimestampType]{ + Signed: TimestampType{ + Type: TIMESTAMP, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Meta: map[string]*MetaFiles{ + "snapshot.json": { + Version: 1, + }, + }, + }, + Signatures: []Signature{}, + } +} + +// Targets return new metadata instance of type Targets +func Targets(expires ...time.Time) *Metadata[TargetsType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", TARGETS) + return &Metadata[TargetsType]{ + Signed: TargetsType{ + Type: TARGETS, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Targets: map[string]*TargetFiles{}, + }, + Signatures: []Signature{}, + } +} + +// TargetFile return new metadata instance of type TargetFiles +func TargetFile() *TargetFiles { + return &TargetFiles{ + Length: 0, + Hashes: Hashes{}, + } +} + +// MetaFile return new metadata instance of type MetaFile +func MetaFile(version int64) *MetaFiles { + if version < 1 { + // attempting to set incorrect version + log.Info("Attempting to set incorrect version for MetaFile", "version", version) + version = 1 + } + return &MetaFiles{ + Length: 0, + Hashes: Hashes{}, + Version: version, + } +} + +// FromFile load metadata from file +func (meta *Metadata[T]) FromFile(name string) (*Metadata[T], error) { + in, err := os.Open(name) + if err != nil { + return nil, err + } + defer in.Close() + data, err := io.ReadAll(in) + if err != nil { + return nil, err + } + m, err := fromBytes[T](data) + if err != nil { + return nil, err + } + *meta = *m + log.Info("Loaded metadata from file", "name", name) + return meta, nil +} + +// FromBytes deserialize metadata from bytes +func (meta *Metadata[T]) FromBytes(data []byte) (*Metadata[T], error) { + m, err := fromBytes[T](data) + if err != nil { + return nil, err + } + *meta = *m + log.Info("Loaded metadata from bytes") + return meta, nil +} + +// ToBytes serialize metadata to bytes +func (meta *Metadata[T]) ToBytes(pretty bool) ([]byte, error) { + log.Info("Writing metadata to bytes") + if pretty { + return json.MarshalIndent(*meta, "", "\t") + } + return json.Marshal(*meta) +} + +// ToFile save metadata to file +func (meta *Metadata[T]) ToFile(name string, pretty bool) error { + log.Info("Writing metadata to file", "name", name) + data, err := meta.ToBytes(pretty) + if err != nil { + return err + } + return os.WriteFile(name, data, 0644) +} + +// Sign create signature over Signed and assign it to Signatures +func (meta *Metadata[T]) Sign(signer signature.Signer) (*Signature, error) { + // encode the Signed part to canonical JSON so signatures are consistent + payload, err := cjson.EncodeCanonical(meta.Signed) + if err != nil { + return nil, err + } + // sign the Signed part + sb, err := signer.SignMessage(bytes.NewReader(payload)) + if err != nil { + return nil, &ErrUnsignedMetadata{Msg: "problem signing metadata"} + } + // get the signer's PublicKey + publ, err := signer.PublicKey() + if err != nil { + return nil, err + } + // convert to TUF Key type to get keyID + key, err := KeyFromPublicKey(publ) + if err != nil { + return nil, err + } + // build signature + sig := &Signature{ + KeyID: key.ID(), + Signature: sb, + } + // update the Signatures part + meta.Signatures = append(meta.Signatures, *sig) + // return the new signature + log.Info("Signed metadata with key", "ID", key.ID()) + return sig, nil +} + +// VerifyDelegate verifies that delegatedMetadata is signed with the required +// threshold of keys for the delegated role delegatedRole +func (meta *Metadata[T]) VerifyDelegate(delegatedRole string, delegatedMetadata any) error { + i := any(meta) + signingKeys := map[string]bool{} + var keys map[string]*Key + var roleKeyIDs []string + var roleThreshold int + + log.Info("Verifying", "role", delegatedRole) + + // collect keys, keyIDs and threshold based on delegator type + switch i := i.(type) { + // Root delegator + case *Metadata[RootType]: + keys = i.Signed.Keys + if role, ok := (*i).Signed.Roles[delegatedRole]; ok { + roleKeyIDs = role.KeyIDs + roleThreshold = role.Threshold + } else { + // the delegated role was not found, no need to proceed + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + // Targets delegator + case *Metadata[TargetsType]: + if i.Signed.Delegations == nil { + return &ErrValue{Msg: "no delegations found"} + } + keys = i.Signed.Delegations.Keys + if i.Signed.Delegations.Roles != nil { + found := false + for _, v := range i.Signed.Delegations.Roles { + if v.Name == delegatedRole { + found = true + roleKeyIDs = v.KeyIDs + roleThreshold = v.Threshold + break + } + } + // the delegated role was not found, no need to proceed + if !found { + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + } else if i.Signed.Delegations.SuccinctRoles != nil { + roleKeyIDs = i.Signed.Delegations.SuccinctRoles.KeyIDs + roleThreshold = i.Signed.Delegations.SuccinctRoles.Threshold + } + default: + return &ErrType{Msg: "call is valid only on delegator metadata (should be either root or targets)"} + } + // if there are no keyIDs for that role it means there's no delegation found + if len(roleKeyIDs) == 0 { + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + // loop through each role keyID + for _, keyID := range roleKeyIDs { + key, ok := keys[keyID] + if !ok { + return &ErrValue{Msg: fmt.Sprintf("key with ID %s not found in %s keyids", keyID, delegatedRole)} + } + sign := Signature{} + var payload []byte + // convert to a PublicKey type + publicKey, err := key.ToPublicKey() + if err != nil { + return err + } + // use corresponding hash function for key type + hash := crypto.Hash(0) + if key.Type != KeyTypeEd25519 { + switch key.Scheme { + case KeySchemeECDSA_SHA2_P256: + hash = crypto.SHA256 + case KeySchemeECDSA_SHA2_P384: + hash = crypto.SHA384 + default: + hash = crypto.SHA256 + } + } + // load a verifier based on that key + verifier, err := signature.LoadVerifier(publicKey, hash) + if err != nil { + return err + } + // collect the signature for that key and build the payload we'll verify + // based on the Signed part of the delegated metadata + switch d := delegatedMetadata.(type) { + case *Metadata[RootType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[SnapshotType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[TimestampType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[TargetsType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + default: + return &ErrType{Msg: "unknown delegated metadata type"} + } + // verify if the signature for that payload corresponds to the given key + if err := verifier.VerifySignature(bytes.NewReader(sign.Signature), bytes.NewReader(payload)); err != nil { + // failed to verify the metadata with that key ID + log.Info("Failed to verify %s with key ID %s", delegatedRole, keyID) + } else { + // save the verified keyID only if verification passed + signingKeys[keyID] = true + log.Info("Verified with key", "role", delegatedRole, "ID", keyID) + } + } + // check if the amount of valid signatures is enough + if len(signingKeys) < roleThreshold { + log.Info("Verifying failed, not enough signatures", "role", delegatedRole, "got", len(signingKeys), "want", roleThreshold) + return &ErrUnsignedMetadata{Msg: fmt.Sprintf("Verifying %s failed, not enough signatures, got %d, want %d", delegatedRole, len(signingKeys), roleThreshold)} + } + log.Info("Verified successfully", "role", delegatedRole) + return nil +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *RootType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *SnapshotType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *TimestampType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *TargetsType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// VerifyLengthHashes checks whether the MetaFiles data matches its corresponding +// length and hashes +func (f *MetaFiles) VerifyLengthHashes(data []byte) error { + // hashes and length are optional for MetaFiles + if len(f.Hashes) > 0 { + err := verifyHashes(data, f.Hashes) + if err != nil { + return err + } + } + if f.Length != 0 { + err := verifyLength(data, f.Length) + if err != nil { + return err + } + } + return nil +} + +// VerifyLengthHashes checks whether the TargetFiles data matches its corresponding +// length and hashes +func (f *TargetFiles) VerifyLengthHashes(data []byte) error { + err := verifyHashes(data, f.Hashes) + if err != nil { + return err + } + err = verifyLength(data, f.Length) + if err != nil { + return err + } + return nil +} + +// Equal checks whether the source target file matches another +func (source *TargetFiles) Equal(expected TargetFiles) bool { + if source.Length == expected.Length && source.Hashes.Equal(expected.Hashes) { + return true + } + return false +} + +// FromFile generate TargetFiles from file +func (t *TargetFiles) FromFile(localPath string, hashes ...string) (*TargetFiles, error) { + log.Info("Generating target file from file", "path", localPath) + // open file + in, err := os.Open(localPath) + if err != nil { + return nil, err + } + defer in.Close() + // read file + data, err := io.ReadAll(in) + if err != nil { + return nil, err + } + return t.FromBytes(localPath, data, hashes...) +} + +// FromBytes generate TargetFiles from bytes +func (t *TargetFiles) FromBytes(localPath string, data []byte, hashes ...string) (*TargetFiles, error) { + log.Info("Generating target file from bytes", "path", localPath) + var hasher hash.Hash + targetFile := &TargetFiles{ + Hashes: map[string]HexBytes{}, + } + // use default hash algorithm if not set + if len(hashes) == 0 { + hashes = []string{"sha256"} + } + // calculate length + len, err := io.Copy(io.Discard, bytes.NewReader(data)) + if err != nil { + return nil, err + } + targetFile.Length = len + for _, v := range hashes { + switch v { + case "sha256": + hasher = sha256.New() + case "sha512": + hasher = sha512.New() + default: + return nil, &ErrValue{Msg: fmt.Sprintf("failed generating TargetFile - unsupported hashing algorithm - %s", v)} + } + _, err := hasher.Write(data) + if err != nil { + return nil, err + } + targetFile.Hashes[v] = hasher.Sum(nil) + } + targetFile.Path = localPath + return targetFile, nil +} + +// ClearSignatures clears Signatures +func (meta *Metadata[T]) ClearSignatures() { + log.Info("Cleared signatures") + meta.Signatures = []Signature{} +} + +// IsDelegatedPath determines whether the given "targetFilepath" is in one of +// the paths that "DelegatedRole" is trusted to provide +func (role *DelegatedRole) IsDelegatedPath(targetFilepath string) (bool, error) { + if len(role.Paths) > 0 { + // standard delegations + for _, pathPattern := range role.Paths { + // A delegated role path may be an explicit path or glob + // pattern (Unix shell-style wildcards). + if isTargetInPathPattern(targetFilepath, pathPattern) { + return true, nil + } + } + } else if len(role.PathHashPrefixes) > 0 { + // hash bin delegations - calculate the hash of the filepath to determine in which bin to find the target. + targetFilepathHash := sha256.Sum256([]byte(targetFilepath)) + for _, pathHashPrefix := range role.PathHashPrefixes { + if strings.HasPrefix(base64.URLEncoding.EncodeToString(targetFilepathHash[:]), pathHashPrefix) { + return true, nil + } + } + } + return false, nil +} + +// Determine whether “targetpath“ matches the “pathpattern“. +func isTargetInPathPattern(targetpath string, pathpattern string) bool { + // We need to make sure that targetpath and pathpattern are pointing to + // the same directory as fnmatch doesn't threat "/" as a special symbol. + targetParts := strings.Split(targetpath, "/") + patternParts := strings.Split(pathpattern, "/") + if len(targetParts) != len(patternParts) { + return false + } + + // Every part in the pathpattern could include a glob pattern, that's why + // each of the target and pathpattern parts should match. + for i := 0; i < len(targetParts); i++ { + if ok, _ := filepath.Match(patternParts[i], targetParts[i]); !ok { + return false + } + } + + return true +} + +// GetRolesForTarget return the names and terminating status of all +// delegated roles who are responsible for targetFilepath +// Note the result should be an ordered list, ref. https://github.com/theupdateframework/go-tuf/security/advisories/GHSA-4f8r-qqr9-fq8j +func (role *Delegations) GetRolesForTarget(targetFilepath string) []RoleResult { + var res []RoleResult + // Standard delegations + if role.Roles != nil { + for _, r := range role.Roles { + ok, err := r.IsDelegatedPath(targetFilepath) + if err == nil && ok { + res = append(res, RoleResult{Name: r.Name, Terminating: r.Terminating}) + } + } + } else if role.SuccinctRoles != nil { + // SuccinctRoles delegations + res = role.SuccinctRoles.GetRolesForTarget(targetFilepath) + } + // We preserve the same order as the actual roles list + return res +} + +// GetRolesForTarget calculate the name of the delegated role responsible for "targetFilepath". +// The target at path "targetFilepath" is assigned to a bin by casting +// the left-most "BitLength" of bits of the file path hash digest to +// int, using it as bin index between 0 and “2**BitLength-1”. +func (role *SuccinctRoles) GetRolesForTarget(targetFilepath string) []RoleResult { + // calculate the suffixLen value based on the total number of bins in + // hex. If bit_length = 10 then numberOfBins = 1024 or bin names will + // have a suffix between "000" and "3ff" in hex and suffixLen will be 3 + // meaning the third bin will have a suffix of "003" + numberOfBins := math.Pow(2, float64(role.BitLength)) + // suffixLen is calculated based on "numberOfBins - 1" as the name + // of the last bin contains the number "numberOfBins -1" as a suffix. + suffixLen := len(strconv.FormatInt(int64(numberOfBins-1), 16)) + + targetFilepathHash := sha256.Sum256([]byte(targetFilepath)) + // we can't ever need more than 4 bytes (32 bits) + hashBytes := targetFilepathHash[:4] + + // right shift hash bytes, so that we only have the leftmost + // bit_length bits that we care about + shiftValue := 32 - role.BitLength + binNumber := binary.BigEndian.Uint32(hashBytes) >> shiftValue + // add zero padding if necessary and cast to hex the suffix + suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) + // we consider all succinct_roles as terminating. + // for more information, read TAP 15. + return []RoleResult{{Name: fmt.Sprintf("%s-%s", role.NamePrefix, suffix), Terminating: true}} +} + +// GetRoles returns the names of all different delegated roles +func (role *SuccinctRoles) GetRoles() []string { + res := []string{} + suffixLen, numberOfBins := role.GetSuffixLen() + + for binNumber := 0; binNumber < numberOfBins; binNumber++ { + suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) + res = append(res, fmt.Sprintf("%s-%s", role.NamePrefix, suffix)) + } + return res +} + +func (role *SuccinctRoles) GetSuffixLen() (int, int) { + numberOfBins := int(math.Pow(2, float64(role.BitLength))) + return len(strconv.FormatInt(int64(numberOfBins-1), 16)), numberOfBins +} + +// IsDelegatedRole returns whether the given roleName is in one of +// the delegated roles that “SuccinctRoles“ represents +func (role *SuccinctRoles) IsDelegatedRole(roleName string) bool { + suffixLen, numberOfBins := role.GetSuffixLen() + + expectedPrefix := fmt.Sprintf("%s-", role.NamePrefix) + + // check if the roleName prefix is what we would expect + if !strings.HasPrefix(roleName, expectedPrefix) { + return false + } + + // check if the roleName suffix length is what we would expect + suffix := roleName[len(expectedPrefix):] + if len(suffix) != suffixLen { + return false + } + + // make sure suffix is hex value and get bin number + value, err := strconv.ParseInt(suffix, 16, 64) + if err != nil { + return false + } + + // check if the bin we calculated is indeed within the range of what we support + return (value >= 0) && (value < int64(numberOfBins)) +} + +// AddKey adds new signing key for delegated role "role" +// keyID: Identifier of the key to be added for “role“. +// key: Signing key to be added for “role“. +// role: Name of the role, for which “key“ is added. +func (signed *RootType) AddKey(key *Key, role string) error { + // verify role is present + if _, ok := signed.Roles[role]; !ok { + return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} + } + // add keyID to role + if !slices.Contains(signed.Roles[role].KeyIDs, key.ID()) { + signed.Roles[role].KeyIDs = append(signed.Roles[role].KeyIDs, key.ID()) + } + // update Keys + signed.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil +} + +// RevokeKey revoke key from “role“ and updates the Keys store. +// keyID: Identifier of the key to be removed for “role“. +// role: Name of the role, for which a signing key is removed. +func (signed *RootType) RevokeKey(keyID, role string) error { + // verify role is present + if _, ok := signed.Roles[role]; !ok { + return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} + } + // verify keyID is present for given role + if !slices.Contains(signed.Roles[role].KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)} + } + // remove keyID from role + filteredKeyIDs := []string{} + for _, k := range signed.Roles[role].KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice + signed.Roles[role].KeyIDs = filteredKeyIDs + // check if keyID is used by other roles too + for _, r := range signed.Roles { + if slices.Contains(r.KeyIDs, keyID) { + return nil + } + } + // delete the keyID from Keys if it's not used anywhere else + delete(signed.Keys, keyID) + return nil +} + +// AddKey adds new signing key for delegated role "role" +// key: Signing key to be added for “role“. +// role: Name of the role, for which “key“ is added. +// If SuccinctRoles is used then the "role" argument can be ignored. +func (signed *TargetsType) AddKey(key *Key, role string) error { + // check if Delegations are even present + if signed.Delegations == nil { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + // standard delegated roles + if signed.Delegations.Roles != nil { + // loop through all delegated roles + isDelegatedRole := false + for i, d := range signed.Delegations.Roles { + // if role is found + if d.Name == role { + isDelegatedRole = true + // add key if keyID is not already part of keyIDs for that role + if !slices.Contains(d.KeyIDs, key.ID()) { + signed.Delegations.Roles[i].KeyIDs = append(signed.Delegations.Roles[i].KeyIDs, key.ID()) + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil + } + log.Info("Delegated role already has keyID", "role", role, "ID", key.ID()) + } + } + if !isDelegatedRole { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + } else if signed.Delegations.SuccinctRoles != nil { + // add key if keyID is not already part of keyIDs for the SuccinctRoles role + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) { + signed.Delegations.SuccinctRoles.KeyIDs = append(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil + } + log.Info("SuccinctRoles role already has keyID", "ID", key.ID()) + + } + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil +} + +// RevokeKey revokes key from delegated role "role" and updates the delegations key store +// keyID: Identifier of the key to be removed for “role“. +// role: Name of the role, for which a signing key is removed. +func (signed *TargetsType) RevokeKey(keyID string, role string) error { + // check if Delegations are even present + if signed.Delegations == nil { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + // standard delegated roles + if signed.Delegations.Roles != nil { + // loop through all delegated roles + for i, d := range signed.Delegations.Roles { + // if role is found + if d.Name == role { + // check if keyID is present in keyIDs for that role + if !slices.Contains(d.KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)} + } + // remove keyID from role + filteredKeyIDs := []string{} + for _, k := range signed.Delegations.Roles[i].KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice for that role + signed.Delegations.Roles[i].KeyIDs = filteredKeyIDs + // check if keyID is used by other roles too + for _, r := range signed.Delegations.Roles { + if slices.Contains(r.KeyIDs, keyID) { + return nil + } + } + // delete the keyID from Keys if it's not used anywhere else + delete(signed.Delegations.Keys, keyID) + return nil + } + } + // we haven't found the delegated role + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } else if signed.Delegations.SuccinctRoles != nil { + // check if keyID is used by SuccinctRoles role + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by SuccinctRoles", keyID)} + } + // remove keyID from the SuccinctRoles role + filteredKeyIDs := []string{} + for _, k := range signed.Delegations.SuccinctRoles.KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice for SuccinctRoles role + signed.Delegations.SuccinctRoles.KeyIDs = filteredKeyIDs + + // delete the keyID from Keys since it can not be used anywhere else + delete(signed.Delegations.Keys, keyID) + return nil + } + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} +} + +// Equal checks whether one hash set equals another +func (source Hashes) Equal(expected Hashes) bool { + hashChecked := false + for typ, hash := range expected { + if h, ok := source[typ]; ok { + // hash type match found + hashChecked = true + if !hmac.Equal(h, hash) { + // hash values don't match + return false + } + } + } + return hashChecked +} + +// verifyLength verifies if the passed data has the corresponding length +func verifyLength(data []byte, length int64) error { + len, err := io.Copy(io.Discard, bytes.NewReader(data)) + if err != nil { + return err + } + if length != len { + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("length verification failed - expected %d, got %d", length, len)} + } + return nil +} + +// verifyHashes verifies if the hash of the passed data corresponds to it +func verifyHashes(data []byte, hashes Hashes) error { + var hasher hash.Hash + for k, v := range hashes { + switch k { + case "sha256": + hasher = sha256.New() + case "sha512": + hasher = sha512.New() + default: + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - unknown hashing algorithm - %s", k)} + } + hasher.Write(data) + if hex.EncodeToString(v) != hex.EncodeToString(hasher.Sum(nil)) { + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - mismatch for algorithm %s", k)} + } + } + return nil +} + +// fromBytes return a *Metadata[T] object from bytes and verifies +// that the data corresponds to the caller struct type +func fromBytes[T Roles](data []byte) (*Metadata[T], error) { + meta := &Metadata[T]{} + // verify that the type we used to create the object is the same as the type of the metadata file + if err := checkType[T](data); err != nil { + return nil, err + } + // if all is okay, unmarshal meta to the desired Metadata[T] type + if err := json.Unmarshal(data, meta); err != nil { + return nil, err + } + // Make sure signature key IDs are unique + if err := checkUniqueSignatures(*meta); err != nil { + return nil, err + } + return meta, nil +} + +// checkUniqueSignatures verifies if the signature key IDs are unique for that metadata +func checkUniqueSignatures[T Roles](meta Metadata[T]) error { + signatures := []string{} + for _, sig := range meta.Signatures { + if slices.Contains(signatures, sig.KeyID) { + return &ErrValue{Msg: fmt.Sprintf("multiple signatures found for key ID %s", sig.KeyID)} + } + signatures = append(signatures, sig.KeyID) + } + return nil +} + +// checkType verifies if the generic type used to create the object is the same as the type of the metadata file in bytes +func checkType[T Roles](data []byte) error { + var m map[string]any + i := any(new(T)) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + signedType := m["signed"].(map[string]any)["_type"].(string) + switch i.(type) { + case *RootType: + if ROOT != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", ROOT, signedType)} + } + case *SnapshotType: + if SNAPSHOT != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", SNAPSHOT, signedType)} + } + case *TimestampType: + if TIMESTAMP != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TIMESTAMP, signedType)} + } + case *TargetsType: + if TARGETS != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TARGETS, signedType)} + } + default: + return &ErrValue{Msg: fmt.Sprintf("unrecognized metadata type - %s", signedType)} + } + // all okay + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go new file mode 100644 index 00000000..0726f31f --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go @@ -0,0 +1,357 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package trustedmetadata + +import ( + "fmt" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// TrustedMetadata struct for storing trusted metadata +type TrustedMetadata struct { + Root *metadata.Metadata[metadata.RootType] + Snapshot *metadata.Metadata[metadata.SnapshotType] + Timestamp *metadata.Metadata[metadata.TimestampType] + Targets map[string]*metadata.Metadata[metadata.TargetsType] + RefTime time.Time +} + +// New creates a new TrustedMetadata instance which ensures that the +// collection of metadata in it is valid and trusted through the whole +// client update workflow. It provides easy ways to update the metadata +// with the caller making decisions on what is updated +func New(rootData []byte) (*TrustedMetadata, error) { + res := &TrustedMetadata{ + Targets: map[string]*metadata.Metadata[metadata.TargetsType]{}, + RefTime: time.Now().UTC(), + } + // load and validate the local root metadata + // valid initial trusted root metadata is required + err := res.loadTrustedRoot(rootData) + if err != nil { + return nil, err + } + return res, nil +} + +// UpdateRoot verifies and loads “rootData“ as new root metadata. +// Note that an expired intermediate root is considered valid: expiry is +// only checked for the final root in UpdateTimestamp() +func (trusted *TrustedMetadata) UpdateRoot(rootData []byte) (*metadata.Metadata[metadata.RootType], error) { + log := metadata.GetLogger() + + if trusted.Timestamp != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update root after timestamp"} + } + log.Info("Updating root") + // generate root metadata + newRoot, err := metadata.Root().FromBytes(rootData) + if err != nil { + return nil, err + } + // check metadata type matches root + if newRoot.Signed.Type != metadata.ROOT { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)} + } + // verify that new root is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return nil, err + } + // verify version + if newRoot.Signed.Version != trusted.Root.Signed.Version+1 { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("bad version number, expected %d, got %d", trusted.Root.Signed.Version+1, newRoot.Signed.Version)} + } + // verify that new root is signed by itself + err = newRoot.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return nil, err + } + // save root if verified + trusted.Root = newRoot + log.Info("Updated root", "version", trusted.Root.Signed.Version) + return trusted.Root, nil +} + +// UpdateTimestamp verifies and loads “timestampData“ as new timestamp metadata. +// Note that an intermediate timestamp is allowed to be expired. "TrustedMetadata" +// will error in this case but the intermediate timestamp will be loaded. +// This way a newer timestamp can still be loaded (and the intermediate +// timestamp will be used for rollback protection). Expired timestamp will +// prevent loading snapshot metadata. +func (trusted *TrustedMetadata) UpdateTimestamp(timestampData []byte) (*metadata.Metadata[metadata.TimestampType], error) { + log := metadata.GetLogger() + + if trusted.Snapshot != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update timestamp after snapshot"} + } + // client workflow 5.3.10: Make sure final root is not expired. + if trusted.Root.Signed.IsExpired(trusted.RefTime) { + // no need to check for 5.3.11 (fast forward attack recovery): + // timestamp/snapshot can not yet be loaded at this point + return nil, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"} + } + log.Info("Updating timestamp") + newTimestamp, err := metadata.Timestamp().FromBytes(timestampData) + if err != nil { + return nil, err + } + // check metadata type matches timestamp + if newTimestamp.Signed.Type != metadata.TIMESTAMP { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TIMESTAMP, newTimestamp.Signed.Type)} + } + // verify that new timestamp is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.TIMESTAMP, newTimestamp) + if err != nil { + return nil, err + } + // if an existing trusted timestamp is updated, + // check for a rollback attack + if trusted.Timestamp != nil { + // prevent rolling back timestamp version + if newTimestamp.Signed.Version < trusted.Timestamp.Signed.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new timestamp version %d must be >= %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)} + } + // keep using old timestamp if versions are equal + if newTimestamp.Signed.Version == trusted.Timestamp.Signed.Version { + log.Info("New timestamp version equals the old one", "new", newTimestamp.Signed.Version, "old", trusted.Timestamp.Signed.Version) + return nil, &metadata.ErrEqualVersionNumber{Msg: fmt.Sprintf("new timestamp version %d equals the old one %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)} + } + // prevent rolling back snapshot version + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + newSnapshotMeta := newTimestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + if newSnapshotMeta.Version < snapshotMeta.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new snapshot version %d must be >= %d", newSnapshotMeta.Version, snapshotMeta.Version)} + } + } + // expiry not checked to allow old timestamp to be used for rollback + // protection of new timestamp: expiry is checked in UpdateSnapshot() + // save root if verified + trusted.Timestamp = newTimestamp + log.Info("Updated timestamp", "version", trusted.Timestamp.Signed.Version) + + // timestamp is loaded: error if it is not valid _final_ timestamp + err = trusted.checkFinalTimestamp() + if err != nil { + return nil, err + } + // all okay + return trusted.Timestamp, nil +} + +// checkFinalTimestamp verifies if trusted timestamp is not expired +func (trusted *TrustedMetadata) checkFinalTimestamp() error { + if trusted.Timestamp.Signed.IsExpired(trusted.RefTime) { + return &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"} + } + return nil +} + +// UpdateSnapshot verifies and loads “snapshotData“ as new snapshot metadata. +// Note that an intermediate snapshot is allowed to be expired and version +// is allowed to not match timestamp meta version: TrustedMetadata +// will error for case of expired metadata or when using bad versions but the +// intermediate snapshot will be loaded. This way a newer snapshot can still +// be loaded (and the intermediate snapshot will be used for rollback protection). +// Expired snapshot or snapshot that does not match timestamp meta version will +// prevent loading targets. +func (trusted *TrustedMetadata) UpdateSnapshot(snapshotData []byte, isTrusted bool) (*metadata.Metadata[metadata.SnapshotType], error) { + log := metadata.GetLogger() + + if trusted.Timestamp == nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot before timestamp"} + } + if trusted.Targets[metadata.TARGETS] != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot after targets"} + } + log.Info("Updating snapshot") + + // snapshot cannot be loaded if final timestamp is expired + err := trusted.checkFinalTimestamp() + if err != nil { + return nil, err + } + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + // verify non-trusted data against the hashes in timestamp, if any. + // trusted snapshot data has already been verified once. + if !isTrusted { + err = snapshotMeta.VerifyLengthHashes(snapshotData) + if err != nil { + return nil, err + } + } + newSnapshot, err := metadata.Snapshot().FromBytes(snapshotData) + if err != nil { + return nil, err + } + // check metadata type matches snapshot + if newSnapshot.Signed.Type != metadata.SNAPSHOT { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.SNAPSHOT, newSnapshot.Signed.Type)} + } + // verify that new snapshot is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.SNAPSHOT, newSnapshot) + if err != nil { + return nil, err + } + + // version not checked against meta version to allow old snapshot to be + // used in rollback protection: it is checked when targets is updated + + // if an existing trusted snapshot is updated, check for rollback attack + if trusted.Snapshot != nil { + for name, info := range trusted.Snapshot.Signed.Meta { + newFileInfo, ok := newSnapshot.Signed.Meta[name] + // prevent removal of any metadata in meta + if !ok { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("new snapshot is missing info for %s", name)} + } + // prevent rollback of any metadata versions + if newFileInfo.Version < info.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", name, newFileInfo.Version, info.Version)} + } + } + } + + // expiry not checked to allow old snapshot to be used for rollback + // protection of new snapshot: it is checked when targets is updated + trusted.Snapshot = newSnapshot + log.Info("Updated snapshot", "version", trusted.Snapshot.Signed.Version) + + // snapshot is loaded, but we error if it's not valid _final_ snapshot + err = trusted.checkFinalSnapshot() + if err != nil { + return nil, err + } + // all okay + return trusted.Snapshot, nil +} + +// checkFinalSnapshot verifies if it's not expired and snapshot version matches timestamp meta version +func (trusted *TrustedMetadata) checkFinalSnapshot() error { + if trusted.Snapshot.Signed.IsExpired(trusted.RefTime) { + return &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"} + } + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + if trusted.Snapshot.Signed.Version != snapshotMeta.Version { + return &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %d, got %d", snapshotMeta.Version, trusted.Snapshot.Signed.Version)} + } + return nil +} + +// UpdateTargets verifies and loads “targetsData“ as new top-level targets metadata. +func (trusted *TrustedMetadata) UpdateTargets(targetsData []byte) (*metadata.Metadata[metadata.TargetsType], error) { + return trusted.UpdateDelegatedTargets(targetsData, metadata.TARGETS, metadata.ROOT) +} + +// UpdateDelegatedTargets verifies and loads “targetsData“ as new metadata for target “roleName“ +func (trusted *TrustedMetadata) UpdateDelegatedTargets(targetsData []byte, roleName, delegatorName string) (*metadata.Metadata[metadata.TargetsType], error) { + log := metadata.GetLogger() + + var ok bool + if trusted.Snapshot == nil { + return nil, &metadata.ErrRuntime{Msg: "cannot load targets before snapshot"} + } + // targets cannot be loaded if final snapshot is expired or its version + // does not match meta version in timestamp + err := trusted.checkFinalSnapshot() + if err != nil { + return nil, err + } + // check if delegator metadata is present + if delegatorName == metadata.ROOT { + if trusted.Root != nil { + ok = true + } else { + ok = false + } + } else { + _, ok = trusted.Targets[delegatorName] + } + if !ok { + return nil, &metadata.ErrRuntime{Msg: "cannot load targets before delegator"} + } + log.Info("Updating delegated role", "role", roleName, "delegator", delegatorName) + // Verify against the hashes in snapshot, if any + meta, ok := trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)] + if !ok { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("snapshot does not contain information for %s", roleName)} + } + err = meta.VerifyLengthHashes(targetsData) + if err != nil { + return nil, err + } + newDelegate, err := metadata.Targets().FromBytes(targetsData) + if err != nil { + return nil, err + } + // check metadata type matches targets + if newDelegate.Signed.Type != metadata.TARGETS { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TARGETS, newDelegate.Signed.Type)} + } + // get delegator metadata and verify the new delegatee + if delegatorName == metadata.ROOT { + err = trusted.Root.VerifyDelegate(roleName, newDelegate) + if err != nil { + return nil, err + } + } else { + err = trusted.Targets[delegatorName].VerifyDelegate(roleName, newDelegate) + if err != nil { + return nil, err + } + } + // check versions + if newDelegate.Signed.Version != meta.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", roleName, meta.Version, newDelegate.Signed.Version)} + } + // check expiration + if newDelegate.Signed.IsExpired(trusted.RefTime) { + return nil, &metadata.ErrExpiredMetadata{Msg: fmt.Sprintf("new %s is expired", roleName)} + } + trusted.Targets[roleName] = newDelegate + log.Info("Updated role", "role", roleName, "version", trusted.Targets[roleName].Signed.Version) + return trusted.Targets[roleName], nil +} + +// loadTrustedRoot verifies and loads "data" as trusted root metadata. +// Note that an expired initial root is considered valid: expiry is +// only checked for the final root in “UpdateTimestamp()“. +func (trusted *TrustedMetadata) loadTrustedRoot(rootData []byte) error { + log := metadata.GetLogger() + + // generate root metadata + newRoot, err := metadata.Root().FromBytes(rootData) + if err != nil { + return err + } + // check metadata type matches root + if newRoot.Signed.Type != metadata.ROOT { + return &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)} + } + // verify root by itself + err = newRoot.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return err + } + // save root if verified + trusted.Root = newRoot + log.Info("Loaded trusted root", "version", trusted.Root.Signed.Version) + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go new file mode 100644 index 00000000..0bd86ee8 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go @@ -0,0 +1,179 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/json" + "time" +) + +// Generic type constraint +type Roles interface { + RootType | SnapshotType | TimestampType | TargetsType +} + +// Define version of the TUF specification +const ( + SPECIFICATION_VERSION = "1.0.31" +) + +// Define top level role names +const ( + ROOT = "root" + SNAPSHOT = "snapshot" + TARGETS = "targets" + TIMESTAMP = "timestamp" +) + +var TOP_LEVEL_ROLE_NAMES = [...]string{ROOT, TIMESTAMP, SNAPSHOT, TARGETS} + +// Metadata[T Roles] represents a TUF metadata. +// Provides methods to read and write to and +// from file and bytes, also to create, verify and clear metadata signatures. +type Metadata[T Roles] struct { + Signed T `json:"signed"` + Signatures []Signature `json:"signatures"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Signature represents the Signature part of a TUF metadata +type Signature struct { + KeyID string `json:"keyid"` + Signature HexBytes `json:"sig"` + UnrecognizedFields map[string]any `json:"-"` +} + +// RootType represents the Signed portion of a root metadata +type RootType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + ConsistentSnapshot bool `json:"consistent_snapshot"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Keys map[string]*Key `json:"keys"` + Roles map[string]*Role `json:"roles"` + UnrecognizedFields map[string]any `json:"-"` +} + +// SnapshotType represents the Signed portion of a snapshot metadata +type SnapshotType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Meta map[string]*MetaFiles `json:"meta"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TargetsType represents the Signed portion of a targets metadata +type TargetsType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Targets map[string]*TargetFiles `json:"targets"` + Delegations *Delegations `json:"delegations,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TimestampType represents the Signed portion of a timestamp metadata +type TimestampType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Meta map[string]*MetaFiles `json:"meta"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Key represents a key in TUF +type Key struct { + Type string `json:"keytype"` + Scheme string `json:"scheme"` + Value KeyVal `json:"keyval"` + id string `json:"-"` + UnrecognizedFields map[string]any `json:"-"` +} + +type KeyVal struct { + PublicKey string `json:"public"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Role represents one of the top-level roles in TUF +type Role struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + UnrecognizedFields map[string]any `json:"-"` +} + +type HexBytes []byte + +type Hashes map[string]HexBytes + +// MetaFiles represents the value portion of METAFILES in TUF (used in Snapshot and Timestamp metadata). Used to store information about a particular meta file. +type MetaFiles struct { + Length int64 `json:"length,omitempty"` + Hashes Hashes `json:"hashes,omitempty"` + Version int64 `json:"version"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TargetFiles represents the value portion of TARGETS in TUF (used Targets metadata). Used to store information about a particular target file. +type TargetFiles struct { + Length int64 `json:"length"` + Hashes Hashes `json:"hashes"` + Custom *json.RawMessage `json:"custom,omitempty"` + Path string `json:"-"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Delegations is an optional object which represents delegation roles and their corresponding keys +type Delegations struct { + Keys map[string]*Key `json:"keys"` + Roles []DelegatedRole `json:"roles,omitempty"` + SuccinctRoles *SuccinctRoles `json:"succinct_roles,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// DelegatedRole represents a delegated role in TUF +type DelegatedRole struct { + Name string `json:"name"` + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + Terminating bool `json:"terminating"` + PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"` + Paths []string `json:"paths,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// SuccinctRoles represents a delegation graph that covers all targets, +// distributing them uniformly over the delegated roles (i.e. bins) in the graph. +type SuccinctRoles struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + BitLength int `json:"bit_length"` + NamePrefix string `json:"name_prefix"` + UnrecognizedFields map[string]any `json:"-"` +} + +// RoleResult represents the name and terminating status of a delegated role that is responsible for targetFilepath +type RoleResult struct { + Name string + Terminating bool +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go new file mode 100644 index 00000000..1b9d21b8 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go @@ -0,0 +1,714 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package updater + +import ( + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata" +) + +// Client update workflow implementation +// +// The "Updater" provides an implementation of the TUF client workflow (ref. https://theupdateframework.github.io/specification/latest/#detailed-client-workflow). +// "Updater" provides an API to query available targets and to download them in a +// secure manner: All downloaded files are verified by signed metadata. +// High-level description of "Updater" functionality: +// - Initializing an "Updater" loads and validates the trusted local root +// metadata: This root metadata is used as the source of trust for all other +// metadata. +// - Refresh() can optionally be called to update and load all top-level +// metadata as described in the specification, using both locally cached +// metadata and metadata downloaded from the remote repository. If refresh is +// not done explicitly, it will happen automatically during the first target +// info lookup. +// - Updater can be used to download targets. For each target: +// - GetTargetInfo() is first used to find information about a +// specific target. This will load new targets metadata as needed (from +// local cache or remote repository). +// - FindCachedTarget() can optionally be used to check if a +// target file is already locally cached. +// - DownloadTarget() downloads a target file and ensures it is +// verified correct by the metadata. +type Updater struct { + trusted *trustedmetadata.TrustedMetadata + cfg *config.UpdaterConfig +} + +type roleParentTuple struct { + Role string + Parent string +} + +// New creates a new Updater instance and loads trusted root metadata +func New(config *config.UpdaterConfig) (*Updater, error) { + // make sure the trusted root metadata and remote URL were provided + if len(config.LocalTrustedRoot) == 0 || len(config.RemoteMetadataURL) == 0 { + return nil, fmt.Errorf("no initial trusted root metadata or remote URL provided") + } + // create a new trusted metadata instance using the trusted root.json + trustedMetadataSet, err := trustedmetadata.New(config.LocalTrustedRoot) + if err != nil { + return nil, err + } + // create an updater instance + updater := &Updater{ + cfg: config, + trusted: trustedMetadataSet, // save trusted metadata set + } + // ensure paths exist, doesn't do anything if caching is disabled + err = updater.cfg.EnsurePathsExist() + if err != nil { + return nil, err + } + // persist the initial root metadata to the local metadata folder + err = updater.persistMetadata(metadata.ROOT, updater.cfg.LocalTrustedRoot) + if err != nil { + return nil, err + } + // all okay, return the updater instance + return updater, nil +} + +// Refresh loads and possibly refreshes top-level metadata. +// Downloads, verifies, and loads metadata for the top-level roles in the +// specified order (root -> timestamp -> snapshot -> targets) implementing +// all the checks required in the TUF client workflow. +// A Refresh() can be done only once during the lifetime of an Updater. +// If Refresh() has not been explicitly called before the first +// GetTargetInfo() call, it will be done implicitly at that time. +// The metadata for delegated roles is not updated by Refresh(): +// that happens on demand during GetTargetInfo(). However, if the +// repository uses consistent snapshots (ref. https://theupdateframework.github.io/specification/latest/#consistent-snapshots), +// then all metadata downloaded by the Updater will use the same consistent repository state. +// +// If UnsafeLocalMode is set, no network interaction is performed, only +// the cached files on disk are used. If the cached data is not complete, +// this call will fail. +func (update *Updater) Refresh() error { + if update.cfg.UnsafeLocalMode { + return update.unsafeLocalRefresh() + } + return update.onlineRefresh() +} + +// onlineRefresh implements the TUF client workflow as described for +// the Refresh function. +func (update *Updater) onlineRefresh() error { + err := update.loadRoot() + if err != nil { + return err + } + err = update.loadTimestamp() + if err != nil { + return err + } + err = update.loadSnapshot() + if err != nil { + return err + } + _, err = update.loadTargets(metadata.TARGETS, metadata.ROOT) + if err != nil { + return err + } + return nil +} + +// unsafeLoadRefresh tries to load the persisted metadata already cached +// on disk. Note that this is an usafe function, and does deviate from the +// TUF specification section 5.3 to 5.7 (update phases). +// The metadata on disk are verified against the provided root though, +// and expiration dates are verified. +func (update *Updater) unsafeLocalRefresh() error { + // Root is already loaded + // load timestamp + var p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP) + data, err := update.loadLocalMetadata(p) + if err != nil { + return err + } + _, err = update.trusted.UpdateTimestamp(data) + if err != nil { + return err + } + + // load snapshot + p = filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT) + data, err = update.loadLocalMetadata(p) + if err != nil { + return err + } + _, err = update.trusted.UpdateSnapshot(data, false) + if err != nil { + return err + } + + // targets + p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TARGETS) + data, err = update.loadLocalMetadata(p) + if err != nil { + return err + } + // verify and load the new target metadata + _, err = update.trusted.UpdateDelegatedTargets(data, metadata.TARGETS, metadata.ROOT) + if err != nil { + return err + } + + return nil +} + +// GetTargetInfo returns metadata.TargetFiles instance with information +// for targetPath. The return value can be used as an argument to +// DownloadTarget() and FindCachedTarget(). +// If Refresh() has not been called before calling +// GetTargetInfo(), the refresh will be done implicitly. +// As a side-effect this method downloads all the additional (delegated +// targets) metadata it needs to return the target information. +func (update *Updater) GetTargetInfo(targetPath string) (*metadata.TargetFiles, error) { + // do a Refresh() in case there's no trusted targets.json yet + if update.trusted.Targets[metadata.TARGETS] == nil { + err := update.Refresh() + if err != nil { + return nil, err + } + } + return update.preOrderDepthFirstWalk(targetPath) +} + +// DownloadTarget downloads the target file specified by targetFile +func (update *Updater) DownloadTarget(targetFile *metadata.TargetFiles, filePath, targetBaseURL string) (string, []byte, error) { + log := metadata.GetLogger() + + var err error + if filePath == "" { + filePath, err = update.generateTargetFilePath(targetFile) + if err != nil { + return "", nil, err + } + } + if targetBaseURL == "" { + if update.cfg.RemoteTargetsURL == "" { + return "", nil, &metadata.ErrValue{Msg: "targetBaseURL must be set in either DownloadTarget() or the Updater struct"} + } + targetBaseURL = ensureTrailingSlash(update.cfg.RemoteTargetsURL) + } else { + targetBaseURL = ensureTrailingSlash(targetBaseURL) + } + + targetFilePath := targetFile.Path + targetRemotePath := targetFilePath + consistentSnapshot := update.trusted.Root.Signed.ConsistentSnapshot + if consistentSnapshot && update.cfg.PrefixTargetsWithHash { + hashes := "" + // get first hex value of hashes + for _, v := range targetFile.Hashes { + hashes = hex.EncodeToString(v) + break + } + baseName := filepath.Base(targetFilePath) + dirName, ok := strings.CutSuffix(targetFilePath, "/"+baseName) + if !ok { + // . + targetRemotePath = fmt.Sprintf("%s.%s", hashes, baseName) + } else { + // /. + targetRemotePath = fmt.Sprintf("%s/%s.%s", dirName, hashes, baseName) + } + } + fullURL := fmt.Sprintf("%s%s", targetBaseURL, targetRemotePath) + data, err := update.cfg.Fetcher.DownloadFile(fullURL, targetFile.Length, time.Second*15) + if err != nil { + return "", nil, err + } + err = targetFile.VerifyLengthHashes(data) + if err != nil { + return "", nil, err + } + + // do not persist the target file if cache is disabled + if !update.cfg.DisableLocalCache { + err = os.WriteFile(filePath, data, 0644) + if err != nil { + return "", nil, err + } + } + log.Info("Downloaded target", "path", targetFile.Path) + return filePath, data, nil +} + +// FindCachedTarget checks whether a local file is an up to date target +func (update *Updater) FindCachedTarget(targetFile *metadata.TargetFiles, filePath string) (string, []byte, error) { + var err error + targetFilePath := "" + // do not look for cached target file if cache is disabled + if update.cfg.DisableLocalCache { + return "", nil, nil + } + // get its path if not provided + if filePath == "" { + targetFilePath, err = update.generateTargetFilePath(targetFile) + if err != nil { + return "", nil, err + } + } else { + targetFilePath = filePath + } + // get file content + data, err := readFile(targetFilePath) + if err != nil { + // do not want to return err, instead we say that there's no cached target available + return "", nil, nil + } + // verify if the length and hashes of this target file match the expected values + err = targetFile.VerifyLengthHashes(data) + if err != nil { + // do not want to return err, instead we say that there's no cached target available + return "", nil, nil + } + // if all okay, return its path + return targetFilePath, data, nil +} + +// loadTimestamp load local and remote timestamp metadata +func (update *Updater) loadTimestamp() error { + log := metadata.GetLogger() + // try to read local timestamp + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP)) + if err != nil { + // this means there's no existing local timestamp so we should proceed downloading it without the need to UpdateTimestamp + log.Info("Local timestamp does not exist") + } else { + // local timestamp exists, let's try to verify it and load it to the trusted metadata set + _, err := update.trusted.UpdateTimestamp(data) + if err != nil { + if errors.Is(err, &metadata.ErrRepository{}) { + // local timestamp is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local timestamp is not valid") + } else { + // another error + return err + } + } + log.Info("Local timestamp is valid") + // all okay, local timestamp exists and it is valid, nevertheless proceed with downloading from remote + } + // load from remote (whether local load succeeded or not) + data, err = update.downloadMetadata(metadata.TIMESTAMP, update.cfg.TimestampMaxLength, "") + if err != nil { + return err + } + // try to verify and load the newly downloaded timestamp + _, err = update.trusted.UpdateTimestamp(data) + if err != nil { + if errors.Is(err, &metadata.ErrEqualVersionNumber{}) { + // if the new timestamp version is the same as current, discard the + // new timestamp; this is normal and it shouldn't raise any error + return nil + } else { + // another error + return err + } + } + // proceed with persisting the new timestamp + err = update.persistMetadata(metadata.TIMESTAMP, data) + if err != nil { + return err + } + return nil +} + +// loadSnapshot load local (and if needed remote) snapshot metadata +func (update *Updater) loadSnapshot() error { + log := metadata.GetLogger() + // try to read local snapshot + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT)) + if err != nil { + // this means there's no existing local snapshot so we should proceed downloading it without the need to UpdateSnapshot + log.Info("Local snapshot does not exist") + } else { + // successfully read a local snapshot metadata, so let's try to verify and load it to the trusted metadata set + _, err = update.trusted.UpdateSnapshot(data, true) + if err != nil { + // this means snapshot verification/loading failed + if errors.Is(err, &metadata.ErrRepository{}) { + // local snapshot is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local snapshot is not valid") + } else { + // another error + return err + } + } else { + // this means snapshot verification/loading succeeded + log.Info("Local snapshot is valid: not downloading new one") + return nil + } + } + // local snapshot does not exist or is invalid, update from remote + log.Info("Failed to load local snapshot") + if update.trusted.Timestamp == nil { + return fmt.Errorf("trusted timestamp not set") + } + // extract the snapshot meta from the trusted timestamp metadata + snapshotMeta := update.trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + // extract the length of the snapshot metadata to be downloaded + length := snapshotMeta.Length + if length == 0 { + length = update.cfg.SnapshotMaxLength + } + // extract which snapshot version should be downloaded in case of consistent snapshots + version := "" + if update.trusted.Root.Signed.ConsistentSnapshot { + version = strconv.FormatInt(snapshotMeta.Version, 10) + } + // download snapshot metadata + data, err = update.downloadMetadata(metadata.SNAPSHOT, length, version) + if err != nil { + return err + } + // verify and load the new snapshot + _, err = update.trusted.UpdateSnapshot(data, false) + if err != nil { + return err + } + // persist the new snapshot + err = update.persistMetadata(metadata.SNAPSHOT, data) + if err != nil { + return err + } + return nil +} + +// loadTargets load local (and if needed remote) metadata for roleName +func (update *Updater) loadTargets(roleName, parentName string) (*metadata.Metadata[metadata.TargetsType], error) { + log := metadata.GetLogger() + // avoid loading "roleName" more than once during "GetTargetInfo" + role, ok := update.trusted.Targets[roleName] + if ok { + return role, nil + } + // try to read local targets + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, roleName)) + if err != nil { + // this means there's no existing local target file so we should proceed downloading it without the need to UpdateDelegatedTargets + log.Info("Local role does not exist", "role", roleName) + } else { + // successfully read a local targets metadata, so let's try to verify and load it to the trusted metadata set + delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName) + if err != nil { + // this means targets verification/loading failed + if errors.Is(err, &metadata.ErrRepository{}) { + // local target file is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local role is not valid", "role", roleName) + } else { + // another error + return nil, err + } + } else { + // this means targets verification/loading succeeded + log.Info("Local role is valid: not downloading new one", "role", roleName) + return delegatedTargets, nil + } + } + // local "roleName" does not exist or is invalid, update from remote + log.Info("Failed to load local role", "role", roleName) + if update.trusted.Snapshot == nil { + return nil, fmt.Errorf("trusted snapshot not set") + } + // extract the targets' meta from the trusted snapshot metadata + metaInfo, ok := update.trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)] + if !ok { + return nil, fmt.Errorf("role %s not found in snapshot", roleName) + } + // extract the length of the target metadata to be downloaded + length := metaInfo.Length + if length == 0 { + length = update.cfg.TargetsMaxLength + } + // extract which target metadata version should be downloaded in case of consistent snapshots + version := "" + if update.trusted.Root.Signed.ConsistentSnapshot { + version = strconv.FormatInt(metaInfo.Version, 10) + } + // download targets metadata + data, err = update.downloadMetadata(roleName, length, version) + if err != nil { + return nil, err + } + // verify and load the new target metadata + delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName) + if err != nil { + return nil, err + } + // persist the new target metadata + err = update.persistMetadata(roleName, data) + if err != nil { + return nil, err + } + return delegatedTargets, nil +} + +// loadRoot load remote root metadata. Sequentially load and +// persist on local disk every newer root metadata version +// available on the remote +func (update *Updater) loadRoot() error { + // calculate boundaries + lowerBound := update.trusted.Root.Signed.Version + 1 + upperBound := lowerBound + update.cfg.MaxRootRotations + + // loop until we find the latest available version of root (download -> verify -> load -> persist) + for nextVersion := lowerBound; nextVersion < upperBound; nextVersion++ { + data, err := update.downloadMetadata(metadata.ROOT, update.cfg.RootMaxLength, strconv.FormatInt(nextVersion, 10)) + if err != nil { + // downloading the root metadata failed for some reason + var tmpErr *metadata.ErrDownloadHTTP + if errors.As(err, &tmpErr) { + if tmpErr.StatusCode != http.StatusNotFound && tmpErr.StatusCode != http.StatusForbidden { + // unexpected HTTP status code + return err + } + // 404/403 means current root is newest available, so we can stop the loop and move forward + break + } + // some other error ocurred + return err + } else { + // downloading root metadata succeeded, so let's try to verify and load it + _, err = update.trusted.UpdateRoot(data) + if err != nil { + return err + } + // persist root metadata to disk + err = update.persistMetadata(metadata.ROOT, data) + if err != nil { + return err + } + } + } + return nil +} + +// preOrderDepthFirstWalk interrogates the tree of target delegations +// in order of appearance (which implicitly order trustworthiness), +// and returns the matching target found in the most trusted role. +func (update *Updater) preOrderDepthFirstWalk(targetFilePath string) (*metadata.TargetFiles, error) { + log := metadata.GetLogger() + // list of delegations to be interrogated. A (role, parent role) pair + // is needed to load and verify the delegated targets metadata + delegationsToVisit := []roleParentTuple{{ + Role: metadata.TARGETS, + Parent: metadata.ROOT, + }} + visitedRoleNames := map[string]bool{} + // pre-order depth-first traversal of the graph of target delegations + for len(visitedRoleNames) <= update.cfg.MaxDelegations && len(delegationsToVisit) > 0 { + // pop the role name from the top of the stack + delegation := delegationsToVisit[len(delegationsToVisit)-1] + delegationsToVisit = delegationsToVisit[:len(delegationsToVisit)-1] + // skip any visited current role to prevent cycles + _, ok := visitedRoleNames[delegation.Role] + if ok { + log.Info("Skipping visited current role", "role", delegation.Role) + continue + } + // the metadata for delegation.Role must be downloaded/updated before + // its targets, delegations, and child roles can be inspected + targets, err := update.loadTargets(delegation.Role, delegation.Parent) + if err != nil { + return nil, err + } + target, ok := targets.Signed.Targets[targetFilePath] + if ok { + log.Info("Found target in current role", "role", delegation.Role) + return target, nil + } + // after pre-order check, add current role to set of visited roles + visitedRoleNames[delegation.Role] = true + if targets.Signed.Delegations != nil { + var childRolesToVisit []roleParentTuple + // note that this may be a slow operation if there are many + // delegated roles + roles := targets.Signed.Delegations.GetRolesForTarget(targetFilePath) + for _, rolesForTarget := range roles { + log.Info("Adding child role", "role", rolesForTarget.Name) + childRolesToVisit = append(childRolesToVisit, roleParentTuple{Role: rolesForTarget.Name, Parent: delegation.Role}) + if rolesForTarget.Terminating { + log.Info("Not backtracking to other roles") + delegationsToVisit = []roleParentTuple{} + break + } + } + // push childRolesToVisit in reverse order of appearance + // onto delegationsToVisit. Roles are popped from the end of + // the list + reverseSlice(childRolesToVisit) + delegationsToVisit = append(delegationsToVisit, childRolesToVisit...) + } + } + if len(delegationsToVisit) > 0 { + log.Info("Too many roles left to visit for max allowed delegations", + "roles-left", len(delegationsToVisit), + "allowed-delegations", update.cfg.MaxDelegations) + } + // if this point is reached then target is not found, return nil + return nil, fmt.Errorf("target %s not found", targetFilePath) +} + +// persistMetadata writes metadata to disk atomically to avoid data loss +func (update *Updater) persistMetadata(roleName string, data []byte) error { + log := metadata.GetLogger() + // do not persist the metadata if we have disabled local caching + if update.cfg.DisableLocalCache { + return nil + } + // caching enabled, proceed with persisting the metadata locally + fileName := filepath.Join(update.cfg.LocalMetadataDir, fmt.Sprintf("%s.json", url.QueryEscape(roleName))) + // create a temporary file + file, err := os.CreateTemp(update.cfg.LocalMetadataDir, "tuf_tmp") + if err != nil { + return err + } + defer file.Close() + // write the data content to the temporary file + err = os.WriteFile(file.Name(), data, 0644) + if err != nil { + // delete the temporary file if there was an error while writing + errRemove := os.Remove(file.Name()) + if errRemove != nil { + log.Info("Failed to delete temporary file", "name", file.Name()) + } + return err + } + + // can't move/rename an open file on windows, so close it first + err = file.Close() + if err != nil { + return err + } + // if all okay, rename the temporary file to the desired one + err = os.Rename(file.Name(), fileName) + if err != nil { + return err + } + read, err := os.ReadFile(fileName) + if err != nil { + return err + } + if string(read) != string(data) { + return fmt.Errorf("failed to persist metadata") + } + return nil +} + +// downloadMetadata download a metadata file and return it as bytes +func (update *Updater) downloadMetadata(roleName string, length int64, version string) ([]byte, error) { + urlPath := ensureTrailingSlash(update.cfg.RemoteMetadataURL) + // build urlPath + if version == "" { + urlPath = fmt.Sprintf("%s%s.json", urlPath, url.QueryEscape(roleName)) + } else { + urlPath = fmt.Sprintf("%s%s.%s.json", urlPath, version, url.QueryEscape(roleName)) + } + return update.cfg.Fetcher.DownloadFile(urlPath, length, time.Second*15) +} + +// generateTargetFilePath generates path from TargetFiles +func (update *Updater) generateTargetFilePath(tf *metadata.TargetFiles) (string, error) { + // LocalTargetsDir can be omitted if caching is disabled + if update.cfg.LocalTargetsDir == "" && !update.cfg.DisableLocalCache { + return "", &metadata.ErrValue{Msg: "LocalTargetsDir must be set if filepath is not given"} + } + // Use URL encoded target path as filename + return filepath.Join(update.cfg.LocalTargetsDir, url.QueryEscape(tf.Path)), nil +} + +// loadLocalMetadata reads a local .json file and returns its bytes +func (update *Updater) loadLocalMetadata(roleName string) ([]byte, error) { + return readFile(fmt.Sprintf("%s.json", roleName)) +} + +// GetTopLevelTargets returns the top-level target files +func (update *Updater) GetTopLevelTargets() map[string]*metadata.TargetFiles { + return update.trusted.Targets[metadata.TARGETS].Signed.Targets +} + +// GetTrustedMetadataSet returns the trusted metadata set +func (update *Updater) GetTrustedMetadataSet() trustedmetadata.TrustedMetadata { + return *update.trusted +} + +// UnsafeSetRefTime sets the reference time that the updater uses. +// This should only be done in tests. +// Using this function is useful when testing time-related behavior in go-tuf. +func (update *Updater) UnsafeSetRefTime(t time.Time) { + update.trusted.RefTime = t +} + +func IsWindowsPath(path string) bool { + match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path) + return match +} + +// ensureTrailingSlash ensures url ends with a slash +func ensureTrailingSlash(url string) string { + if IsWindowsPath(url) { + slash := string(filepath.Separator) + if strings.HasSuffix(url, slash) { + return url + } + return url + slash + } + if strings.HasSuffix(url, "/") { + return url + } + return url + "/" +} + +// reverseSlice reverses the elements in a generic type of slice +func reverseSlice[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +// readFile reads the content of a file and return its bytes +func readFile(name string) ([]byte, error) { + in, err := os.Open(name) + if err != nil { + return nil, err + } + defer in.Close() + data, err := io.ReadAll(in) + if err != nil { + return nil, err + } + return data, nil +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index af006fc9..6a6b3e01 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -56,6 +56,11 @@ func (tr *Reader) RawBytes() []byte { } +// ExpectedPadding returns the number of bytes of padding expected after the last header returned by Next() +func (tr *Reader) ExpectedPadding() int64 { + return tr.pad +} + // NewReader creates a new Reader reading from r. func NewReader(r io.Reader) *Reader { return &Reader{r: r, curr: ®FileReader{r, 0}} diff --git a/vendor/github.com/xanzy/go-gitlab/README.md b/vendor/github.com/xanzy/go-gitlab/README.md deleted file mode 100644 index 9ed7d40a..00000000 --- a/vendor/github.com/xanzy/go-gitlab/README.md +++ /dev/null @@ -1,208 +0,0 @@ -# go-gitlab - -A GitLab API client enabling Go programs to interact with GitLab in a simple and uniform way - -[![Build Status](https://github.com/xanzy/go-gitlab/workflows/Lint%20and%20Test/badge.svg)](https://github.com/xanzy/go-gitlab/actions?workflow=Lint%20and%20Test) -[![Sourcegraph](https://sourcegraph.com/github.com/xanzy/go-gitlab/-/badge.svg)](https://sourcegraph.com/github.com/xanzy/go-gitlab?badge) -[![GoDoc](https://godoc.org/github.com/xanzy/go-gitlab?status.svg)](https://godoc.org/github.com/xanzy/go-gitlab) -[![Go Report Card](https://goreportcard.com/badge/github.com/xanzy/go-gitlab)](https://goreportcard.com/report/github.com/xanzy/go-gitlab) -[![Coverage](https://github.com/xanzy/go-gitlab/wiki/coverage.svg)](https://raw.githack.com/wiki/xanzy/go-gitlab/coverage.html) - -## NOTE - -Release v0.6.0 (released on 25-08-2017) no longer supports the older V3 GitLab API. If -you need V3 support, please use the `f-api-v3` branch. This release contains some backwards -incompatible changes that were needed to fully support the V4 GitLab API. - -## Coverage - -This API client package covers most of the existing GitLab API calls and is updated regularly -to add new and/or missing endpoints. Currently, the following services are supported: - -- [x] Applications -- [x] Award Emojis -- [x] Branches -- [x] Broadcast Messages -- [x] Commits -- [x] Container Registry -- [x] Custom Attributes -- [x] Deploy Keys -- [x] Deployments -- [x] Discussions (threaded comments) -- [x] Environments -- [x] Epic Issues -- [x] Epics -- [x] Error Tracking -- [x] Events -- [x] Feature Flags -- [x] Geo Nodes -- [x] Generic Packages -- [x] GitLab CI Config Templates -- [x] Gitignores Templates -- [x] Group Access Requests -- [x] Group Issue Boards -- [x] Group Members -- [x] Group Milestones -- [x] Group Wikis -- [x] Group-Level Variables -- [x] Groups -- [x] Instance Clusters -- [x] Invites -- [x] Issue Boards -- [x] Issues -- [x] Jobs -- [x] Keys -- [x] Labels -- [x] License -- [x] Markdown -- [x] Merge Request Approvals -- [x] Merge Requests -- [x] Namespaces -- [x] Notes (comments) -- [x] Notification Settings -- [x] Open Source License Templates -- [x] Packages -- [x] Pages -- [x] Pages Domains -- [x] Personal Access Tokens -- [x] Pipeline Schedules -- [x] Pipeline Triggers -- [x] Pipelines -- [x] Plan limits -- [x] Project Access Requests -- [x] Project Badges -- [x] Project Clusters -- [x] Project Import/export -- [x] Project Members -- [x] Project Milestones -- [x] Project Repository Storage Moves -- [x] Project Snippets -- [x] Project Vulnerabilities -- [x] Project-Level Variables -- [x] Projects (including setting Webhooks) -- [x] Protected Branches -- [x] Protected Environments -- [x] Protected Tags -- [x] Repositories -- [x] Repository Files -- [x] Repository Submodules -- [x] Runners -- [x] Search -- [x] Services -- [x] Settings -- [x] Sidekiq Metrics -- [x] System Hooks -- [x] Tags -- [x] Todos -- [x] Topics -- [x] Users -- [x] Validate CI Configuration -- [x] Version -- [x] Wikis - -## Usage - -```go -import "github.com/xanzy/go-gitlab" -``` - -Construct a new GitLab client, then use the various services on the client to -access different parts of the GitLab API. For example, to list all -users: - -```go -git, err := gitlab.NewClient("yourtokengoeshere") -if err != nil { - log.Fatalf("Failed to create client: %v", err) -} -users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) -``` - -There are a few `With...` option functions that can be used to customize -the API client. For example, to set a custom base URL: - -```go -git, err := gitlab.NewClient("yourtokengoeshere", gitlab.WithBaseURL("https://git.mydomain.com/api/v4")) -if err != nil { - log.Fatalf("Failed to create client: %v", err) -} -users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) -``` - -Some API methods have optional parameters that can be passed. For example, -to list all projects for user "svanharmelen": - -```go -git := gitlab.NewClient("yourtokengoeshere") -opt := &gitlab.ListProjectsOptions{Search: gitlab.Ptr("svanharmelen")} -projects, _, err := git.Projects.ListProjects(opt) -``` - -### Examples - -The [examples](https://github.com/xanzy/go-gitlab/tree/master/examples) directory -contains a couple for clear examples, of which one is partially listed here as well: - -```go -package main - -import ( - "log" - - "github.com/xanzy/go-gitlab" -) - -func main() { - git, err := gitlab.NewClient("yourtokengoeshere") - if err != nil { - log.Fatalf("Failed to create client: %v", err) - } - - // Create new project - p := &gitlab.CreateProjectOptions{ - Name: gitlab.Ptr("My Project"), - Description: gitlab.Ptr("Just a test project to play with"), - MergeRequestsEnabled: gitlab.Ptr(true), - SnippetsEnabled: gitlab.Ptr(true), - Visibility: gitlab.Ptr(gitlab.PublicVisibility), - } - project, _, err := git.Projects.CreateProject(p) - if err != nil { - log.Fatal(err) - } - - // Add a new snippet - s := &gitlab.CreateProjectSnippetOptions{ - Title: gitlab.Ptr("Dummy Snippet"), - FileName: gitlab.Ptr("snippet.go"), - Content: gitlab.Ptr("package main...."), - Visibility: gitlab.Ptr(gitlab.PublicVisibility), - } - _, _, err = git.ProjectSnippets.CreateSnippet(project.ID, s) - if err != nil { - log.Fatal(err) - } -} -``` - -For complete usage of go-gitlab, see the full [package docs](https://godoc.org/github.com/xanzy/go-gitlab). - -## ToDo - -- The biggest thing this package still needs is tests :disappointed: - -## Issues - -- If you have an issue: report it on the [issue tracker](https://github.com/xanzy/go-gitlab/issues) - -## Author - -Sander van Harmelen () - -## Contributing - -Contributions are always welcome. For more information, check out the [contributing guide](https://github.com/xanzy/go-gitlab/blob/master/CONTRIBUTING.md) - -## License - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/vendor/github.com/xanzy/go-gitlab/group_hooks.go b/vendor/github.com/xanzy/go-gitlab/group_hooks.go deleted file mode 100644 index 56f2fae6..00000000 --- a/vendor/github.com/xanzy/go-gitlab/group_hooks.go +++ /dev/null @@ -1,221 +0,0 @@ -// -// Copyright 2021, Eric Stevens -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// GroupHook represents a GitLab group hook. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks -type GroupHook struct { - ID int `json:"id"` - URL string `json:"url"` - GroupID int `json:"group_id"` - PushEvents bool `json:"push_events"` - PushEventsBranchFilter string `json:"push_events_branch_filter"` - IssuesEvents bool `json:"issues_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - TagPushEvents bool `json:"tag_push_events"` - NoteEvents bool `json:"note_events"` - JobEvents bool `json:"job_events"` - PipelineEvents bool `json:"pipeline_events"` - WikiPageEvents bool `json:"wiki_page_events"` - DeploymentEvents bool `json:"deployment_events"` - ReleasesEvents bool `json:"releases_events"` - SubGroupEvents bool `json:"subgroup_events"` - EnableSSLVerification bool `json:"enable_ssl_verification"` - AlertStatus string `json:"alert_status"` - CreatedAt *time.Time `json:"created_at"` - CustomWebhookTemplate string `json:"custom_webhook_template"` -} - -// ListGroupHooksOptions represents the available ListGroupHooks() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks -type ListGroupHooksOptions ListOptions - -// ListGroupHooks gets a list of group hooks. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks -func (s *GroupsService) ListGroupHooks(gid interface{}, opt *ListGroupHooksOptions, options ...RequestOptionFunc) ([]*GroupHook, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - var gh []*GroupHook - resp, err := s.client.Do(req, &gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil -} - -// GetGroupHook gets a specific hook for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#get-group-hook -func (s *GroupsService) GetGroupHook(pid interface{}, hook int, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil -} - -// AddGroupHookOptions represents the available AddGroupHook() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook -type AddGroupHookOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` -} - -// AddGroupHook create a new group scoped webhook. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook -func (s *GroupsService) AddGroupHook(gid interface{}, opt *AddGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil -} - -// EditGroupHookOptions represents the available EditGroupHook() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#edit-group-hook -type EditGroupHookOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` -} - -// EditGroupHook edits a hook for a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/groups.html#edit-group-hook -func (s *GroupsService) EditGroupHook(pid interface{}, hook int, opt *EditGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil -} - -// DeleteGroupHook removes a hook from a group. This is an idempotent -// method and can be called multiple times. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-group-hook -func (s *GroupsService) DeleteGroupHook(pid interface{}, hook int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go deleted file mode 100644 index 3ccb41b5..00000000 --- a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go +++ /dev/null @@ -1,119 +0,0 @@ -// -// Copyright 2023, James Hong -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// GroupServiceAccount represents a GitLab service account user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-service-account-user -type GroupServiceAccount struct { - ID int `json:"id"` - Name string `json:"name"` - UserName string `json:"username"` -} - -// CreateServiceAccount create a new service account user for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-service-account-user -func (s *GroupsService) CreateServiceAccount(gid interface{}, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - sa := new(GroupServiceAccount) - resp, err := s.client.Do(req, sa) - if err != nil { - return nil, resp, err - } - - return sa, resp, nil -} - -// CreateServiceAccountPersonalAccessTokenOptions represents the available -// CreateServiceAccountPersonalAccessToken() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user -type CreateServiceAccountPersonalAccessTokenOptions struct { - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// CreateServiceAccountPersonalAccessToken add a new Personal Access Token for a -// service account user for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user -func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens", PathEscape(group), serviceAccount) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// RotateServiceAccountPersonalAccessToken rotates a Personal Access Token for a -// service account user for a group. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user -func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount, token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens/%d/rotate", PathEscape(group), serviceAccount, token) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/member_roles.go b/vendor/github.com/xanzy/go-gitlab/member_roles.go deleted file mode 100644 index cefb94b2..00000000 --- a/vendor/github.com/xanzy/go-gitlab/member_roles.go +++ /dev/null @@ -1,121 +0,0 @@ -package gitlab - -import ( - "fmt" - "net/http" -) - -// MemberRolesService handles communication with the member roles related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html -type MemberRolesService struct { - client *Client -} - -// MemberRole represents a GitLab member role. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html -type MemberRole struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - GroupId int `json:"group_id"` - BaseAccessLevel AccessLevelValue `json:"base_access_level"` - AdminCICDVariables bool `json:"admin_cicd_variables,omitempty"` - AdminMergeRequests bool `json:"admin_merge_request,omitempty"` - AdminTerraformState bool `json:"admin_terraform_state,omitempty"` - AdminVulnerability bool `json:"admin_vulnerability,omitempty"` - ReadCode bool `json:"read_code,omitempty"` - ReadDependency bool `json:"read_dependency,omitempty"` - ReadVulnerability bool `json:"read_vulnerability,omitempty"` - AdminGroupMembers bool `json:"admin_group_member,omitempty"` - ManageProjectAccessToken bool `json:"manage_project_access_tokens,omitempty"` - ArchiveProject bool `json:"archive_project,omitempty"` - RemoveProject bool `json:"remove_project,omitempty"` - ManageGroupAccesToken bool `json:"manage_group_access_tokens,omitempty"` -} - -// ListMemberRoles gets a list of member roles for a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/member_roles.html#list-all-member-roles-of-a-group -func (s *MemberRolesService) ListMemberRoles(gid interface{}, options ...RequestOptionFunc) ([]*MemberRole, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var mrs []*MemberRole - resp, err := s.client.Do(req, &mrs) - if err != nil { - return nil, resp, err - } - - return mrs, resp, nil -} - -// CreateMemberRoleOptions represents the available CreateMemberRole() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group -type CreateMemberRoleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - BaseAccessLevel *AccessLevelValue `url:"base_access_level,omitempty" json:"base_access_level,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - AdminMergeRequest *bool `url:"admin_merge_request,omitempty" json:"admin_merge_request,omitempty"` - AdminVulnerability *bool `url:"admin_vulnerability,omitempty" json:"admin_vulnerability,omitempty"` - ReadCode *bool `url:"read_code,omitempty" json:"read_code,omitempty"` - ReadDependency *bool `url:"read_dependency,omitempty" json:"read_dependency,omitempty"` - ReadVulnerability *bool `url:"read_vulnerability,omitempty" json:"read_vulnerability,omitempty"` -} - -// CreateMemberRole creates a new member role for a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group -func (s *MemberRolesService) CreateMemberRole(gid interface{}, opt *CreateMemberRoleOptions, options ...RequestOptionFunc) (*MemberRole, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - mr := new(MemberRole) - resp, err := s.client.Do(req, mr) - if err != nil { - return nil, resp, err - } - - return mr, resp, nil -} - -// DeleteMemberRole deletes a member role from a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/member_roles.html#remove-member-role-of-a-group -func (s *MemberRolesService) DeleteMemberRole(gid interface{}, memberRole int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/member_roles/%d", PathEscape(group), memberRole) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go deleted file mode 100644 index ac905668..00000000 --- a/vendor/github.com/xanzy/go-gitlab/settings.go +++ /dev/null @@ -1,779 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "net/http" - "time" -) - -// SettingsService handles communication with the application SettingsService -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/settings.html -type SettingsService struct { - client *Client -} - -// Settings represents the GitLab application settings. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/settings.html -// -// The available parameters have been modeled directly after the code, as the -// documentation seems to be inaccurate. -// -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/settings.rb -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/entities/application_setting.rb#L5 -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/app/helpers/application_settings_helper.rb#L192 -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/lib/ee/api/helpers/settings_helpers.rb#L10 -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/app/helpers/ee/application_settings_helper.rb#L20 -type Settings struct { - ID int `json:"id"` - AbuseNotificationEmail string `json:"abuse_notification_email"` - AdminMode bool `json:"admin_mode"` - AfterSignOutPath string `json:"after_sign_out_path"` - AfterSignUpText string `json:"after_sign_up_text"` - AkismetAPIKey string `json:"akismet_api_key"` - AkismetEnabled bool `json:"akismet_enabled"` - AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` - AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` - AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` - ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` - AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` - AssetProxyEnabled bool `json:"asset_proxy_enabled"` - AssetProxyURL string `json:"asset_proxy_url"` - AssetProxySecretKey string `json:"asset_proxy_secret_key"` - AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` - AutoDevOpsDomain string `json:"auto_devops_domain"` - AutoDevOpsEnabled bool `json:"auto_devops_enabled"` - AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` - CanCreateGroup bool `json:"can_create_group"` - CheckNamespacePlan bool `json:"check_namespace_plan"` - CommitEmailHostname string `json:"commit_email_hostname"` - ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` - ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` - ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` - ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` - ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` - ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` - ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` - ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` - ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` - ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` - ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` - ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` - CreatedAt *time.Time `json:"created_at"` - CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` - DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` - DSAKeyRestriction int `json:"dsa_key_restriction"` - DeactivateDormantUsers bool `json:"deactivate_dormant_users"` - DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` - DefaultBranchName string `json:"default_branch_name"` - DefaultBranchProtection int `json:"default_branch_protection"` - DefaultCiConfigPath string `json:"default_ci_config_path"` - DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` - DefaultProjectCreation int `json:"default_project_creation"` - DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` - DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` - DefaultProjectsLimit int `json:"default_projects_limit"` - DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` - DelayedGroupDeletion bool `json:"delayed_group_deletion"` - DelayedProjectDeletion bool `json:"delayed_project_deletion"` - DeleteInactiveProjects bool `json:"delete_inactive_projects"` - DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` - DiffMaxFiles int `json:"diff_max_files"` - DiffMaxLines int `json:"diff_max_lines"` - DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` - DisableFeedToken bool `json:"disable_feed_token"` - DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` - DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` - DomainAllowlist []string `json:"domain_allowlist"` - DomainDenylist []string `json:"domain_denylist"` - DomainDenylistEnabled bool `json:"domain_denylist_enabled"` - ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` - ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` - EKSAccessKeyID string `json:"eks_access_key_id"` - EKSAccountID string `json:"eks_account_id"` - EKSIntegrationEnabled bool `json:"eks_integration_enabled"` - EKSSecretAccessKey string `json:"eks_secret_access_key"` - Ed25519KeyRestriction int `json:"ed25519_key_restriction"` - Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` - ElasticsearchAWS bool `json:"elasticsearch_aws"` - ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` - ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` - ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` - ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` - ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` - ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` - ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` - ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` - ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` - ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` - ElasticsearchIndexing bool `json:"elasticsearch_indexing"` - ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` - ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` - ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` - ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` - ElasticsearchPassword string `json:"elasticsearch_password"` - ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` - ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` - ElasticsearchReplicas int `json:"elasticsearch_replicas"` - ElasticsearchSearch bool `json:"elasticsearch_search"` - ElasticsearchShards int `json:"elasticsearch_shards"` - ElasticsearchURL []string `json:"elasticsearch_url"` - ElasticsearchUsername string `json:"elasticsearch_username"` - EmailAdditionalText string `json:"email_additional_text"` - EmailAuthorInBody bool `json:"email_author_in_body"` - EmailRestrictions string `json:"email_restrictions"` - EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` - EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` - EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` - EnforcePATExpiration bool `json:"enforce_pat_expiration"` - EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` - EnforceTerms bool `json:"enforce_terms"` - ExternalAuthClientCert string `json:"external_auth_client_cert"` - ExternalAuthClientKey string `json:"external_auth_client_key"` - ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` - ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` - ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` - ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` - ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` - ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` - ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` - ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` - FileTemplateProjectID int `json:"file_template_project_id"` - FirstDayOfWeek int `json:"first_day_of_week"` - FlocEnabled bool `json:"floc_enabled"` - GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` - GeoStatusTimeout int `json:"geo_status_timeout"` - GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` - GitalyTimeoutDefault int `json:"gitaly_timeout_default"` - GitalyTimeoutFast int `json:"gitaly_timeout_fast"` - GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` - GitpodEnabled bool `json:"gitpod_enabled"` - GitpodURL string `json:"gitpod_url"` - GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` - GrafanaEnabled bool `json:"grafana_enabled"` - GrafanaURL string `json:"grafana_url"` - GravatarEnabled bool `json:"gravatar_enabled"` - GroupDownloadExportLimit int `json:"group_download_export_limit"` - GroupExportLimit int `json:"group_export_limit"` - GroupImportLimit int `json:"group_import_limit"` - GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` - GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` - HTMLEmailsEnabled bool `json:"html_emails_enabled"` - HashedStorageEnabled bool `json:"hashed_storage_enabled"` - HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` - HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` - HelpPageSupportURL string `json:"help_page_support_url"` - HelpPageText string `json:"help_page_text"` - HelpText string `json:"help_text"` - HideThirdPartyOffers bool `json:"hide_third_party_offers"` - HomePageURL string `json:"home_page_url"` - HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` - HousekeepingEnabled bool `json:"housekeeping_enabled"` - HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` - HousekeepingGcPeriod int `json:"housekeeping_gc_period"` - HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` - HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` - ImportSources []string `json:"import_sources"` - InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` - InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` - InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` - InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` - InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` - IssuesCreateLimit int `json:"issues_create_limit"` - KeepLatestArtifact bool `json:"keep_latest_artifact"` - KrokiEnabled bool `json:"kroki_enabled"` - KrokiFormats map[string]bool `json:"kroki_formats"` - KrokiURL string `json:"kroki_url"` - LocalMarkdownVersion int `json:"local_markdown_version"` - LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` - LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` - MailgunEventsEnabled bool `json:"mailgun_events_enabled"` - MailgunSigningKey string `json:"mailgun_signing_key"` - MaintenanceMode bool `json:"maintenance_mode"` - MaintenanceModeMessage string `json:"maintenance_mode_message"` - MaxArtifactsSize int `json:"max_artifacts_size"` - MaxAttachmentSize int `json:"max_attachment_size"` - MaxExportSize int `json:"max_export_size"` - MaxImportSize int `json:"max_import_size"` - MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` - MaxPagesSize int `json:"max_pages_size"` - MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` - MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` - MaxYAMLDepth int `json:"max_yaml_depth"` - MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` - MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` - MinimumPasswordLength int `json:"minimum_password_length"` - MirrorAvailable bool `json:"mirror_available"` - MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` - MirrorMaxCapacity int `json:"mirror_max_capacity"` - MirrorMaxDelay int `json:"mirror_max_delay"` - NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` - NotesCreateLimit int `json:"notes_create_limit"` - NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` - OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` - OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` - PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` - PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` - PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` - PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` - PasswordNumberRequired bool `json:"password_number_required"` - PasswordSymbolRequired bool `json:"password_symbol_required"` - PasswordUppercaseRequired bool `json:"password_uppercase_required"` - PasswordLowercaseRequired bool `json:"password_lowercase_required"` - PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` - PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` - PerformanceBarEnabled bool `json:"performance_bar_enabled"` - PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` - PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` - PlantumlEnabled bool `json:"plantuml_enabled"` - PlantumlURL string `json:"plantuml_url"` - PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` - PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` - PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` - ProjectDownloadExportLimit int `json:"project_download_export_limit"` - ProjectExportEnabled bool `json:"project_export_enabled"` - ProjectExportLimit int `json:"project_export_limit"` - ProjectImportLimit int `json:"project_import_limit"` - ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` - PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` - ProtectedCIVariables bool `json:"protected_ci_variables"` - PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` - PushEventActivitiesLimit int `json:"push_event_activities_limit"` - PushEventHooksLimit int `json:"push_event_hooks_limit"` - PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` - RSAKeyRestriction int `json:"rsa_key_restriction"` - RateLimitingResponseText string `json:"rate_limiting_response_text"` - RawBlobRequestLimit int `json:"raw_blob_request_limit"` - RecaptchaEnabled bool `json:"recaptcha_enabled"` - RecaptchaPrivateKey string `json:"recaptcha_private_key"` - RecaptchaSiteKey string `json:"recaptcha_site_key"` - ReceiveMaxInputSize int `json:"receive_max_input_size"` - RepositoryChecksEnabled bool `json:"repository_checks_enabled"` - RepositorySizeLimit int `json:"repository_size_limit"` - RepositoryStorages []string `json:"repository_storages"` - RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` - RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` - RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` - RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` - RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` - SearchRateLimit int `json:"search_rate_limit"` - SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` - SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` - SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` - SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` - SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` - SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` - SentryClientsideDSN string `json:"sentry_clientside_dsn"` - SentryDSN string `json:"sentry_dsn"` - SentryEnabled bool `json:"sentry_enabled"` - SentryEnvironment string `json:"sentry_environment"` - SessionExpireDelay int `json:"session_expire_delay"` - SharedRunnersEnabled bool `json:"shared_runners_enabled"` - SharedRunnersMinutes int `json:"shared_runners_minutes"` - SharedRunnersText string `json:"shared_runners_text"` - SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` - SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` - SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` - SignInText string `json:"sign_in_text"` - SignupEnabled bool `json:"signup_enabled"` - SlackAppEnabled bool `json:"slack_app_enabled"` - SlackAppID string `json:"slack_app_id"` - SlackAppSecret string `json:"slack_app_secret"` - SlackAppSigningSecret string `json:"slack_app_signing_secret"` - SlackAppVerificationToken string `json:"slack_app_verification_token"` - SnippetSizeLimit int `json:"snippet_size_limit"` - SnowplowAppID string `json:"snowplow_app_id"` - SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` - SnowplowCookieDomain string `json:"snowplow_cookie_domain"` - SnowplowEnabled bool `json:"snowplow_enabled"` - SourcegraphEnabled bool `json:"sourcegraph_enabled"` - SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` - SourcegraphURL string `json:"sourcegraph_url"` - SpamCheckAPIKey string `json:"spam_check_api_key"` - SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` - SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` - SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` - TerminalMaxSessionTime int `json:"terminal_max_session_time"` - Terms string `json:"terms"` - ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` - ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` - ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` - ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` - ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` - ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` - ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` - ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` - ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` - ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` - ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` - ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` - ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` - ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` - ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` - ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` - ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` - ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` - ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` - ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` - ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` - ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` - ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` - ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` - ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` - ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` - ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` - TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` - UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` - UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` - UpdatedAt *time.Time `json:"updated_at"` - UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` - UsagePingEnabled bool `json:"usage_ping_enabled"` - UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` - UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` - UserDefaultExternal bool `json:"user_default_external"` - UserDefaultInternalRegex string `json:"user_default_internal_regex"` - UserOauthApplications bool `json:"user_oauth_applications"` - UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` - UsersGetByIDLimit int `json:"users_get_by_id_limit"` - UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` - VersionCheckEnabled bool `json:"version_check_enabled"` - WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` - WhatsNewVariant string `json:"whats_new_variant"` - WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` - - // Deprecated: Use AbuseNotificationEmail instead. - AdminNotificationEmail string `json:"admin_notification_email"` - // Deprecated: Use AllowLocalRequestsFromWebHooksAndServices instead. - AllowLocalRequestsFromHooksAndServices bool `json:"allow_local_requests_from_hooks_and_services"` - // Deprecated: Use AssetProxyAllowlist instead. - AssetProxyWhitelist []string `json:"asset_proxy_whitelist"` - // Deprecated: Use ThrottleUnauthenticatedWebEnabled or ThrottleUnauthenticatedAPIEnabled instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedEnabled bool `json:"throttle_unauthenticated_enabled"` - // Deprecated: Use ThrottleUnauthenticatedWebPeriodInSeconds or ThrottleUnauthenticatedAPIPeriodInSeconds instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedPeriodInSeconds int `json:"throttle_unauthenticated_period_in_seconds"` - // Deprecated: Use ThrottleUnauthenticatedWebRequestsPerPeriod or ThrottleUnauthenticatedAPIRequestsPerPeriod instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedRequestsPerPeriod int `json:"throttle_unauthenticated_requests_per_period"` - // Deprecated: Replaced by SearchRateLimit in GitLab 14.9 (removed in 15.0). - UserEmailLookupLimit int `json:"user_email_lookup_limit"` -} - -func (s Settings) String() string { - return Stringify(s) -} - -// GetSettings gets the current application settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/settings.html#get-current-application-settings -func (s *SettingsService) GetSettings(options ...RequestOptionFunc) (*Settings, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "application/settings", nil, options) - if err != nil { - return nil, nil, err - } - - as := new(Settings) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} - -// UpdateSettingsOptions represents the available UpdateSettings() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/settings.html#change-application-settings -type UpdateSettingsOptions struct { - AbuseNotificationEmail *string `url:"abuse_notification_email,omitempty" json:"abuse_notification_email,omitempty"` - AdminMode *bool `url:"admin_mode,omitempty" json:"admin_mode,omitempty"` - AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` - AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"` - AfterSignUpText *string `url:"after_sign_up_text,omitempty" json:"after_sign_up_text,omitempty"` - AkismetAPIKey *string `url:"akismet_api_key,omitempty" json:"akismet_api_key,omitempty"` - AkismetEnabled *bool `url:"akismet_enabled,omitempty" json:"akismet_enabled,omitempty"` - AllowGroupOwnersToManageLDAP *bool `url:"allow_group_owners_to_manage_ldap,omitempty" json:"allow_group_owners_to_manage_ldap,omitempty"` - AllowLocalRequestsFromHooksAndServices *bool `url:"allow_local_requests_from_hooks_and_services,omitempty" json:"allow_local_requests_from_hooks_and_services,omitempty"` - AllowLocalRequestsFromSystemHooks *bool `url:"allow_local_requests_from_system_hooks,omitempty" json:"allow_local_requests_from_system_hooks,omitempty"` - AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` - ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` - AssetProxyAllowlist *[]string `url:"asset_proxy_allowlist,omitempty" json:"asset_proxy_allowlist,omitempty"` - AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` - AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` - AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` - AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` - AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` - AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` - AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - AutomaticPurchasedStorageAllocation *bool `url:"automatic_purchased_storage_allocation,omitempty" json:"automatic_purchased_storage_allocation,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` - CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` - ContainerExpirationPoliciesEnableHistoricEntries *bool `url:"container_expiration_policies_enable_historic_entries,omitempty" json:"container_expiration_policies_enable_historic_entries,omitempty"` - ContainerRegistryCleanupTagsServiceMaxListSize *int `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` - ContainerRegistryDeleteTagsServiceTimeout *int `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` - ContainerRegistryExpirationPoliciesCaching *bool `url:"container_registry_expiration_policies_caching,omitempty" json:"container_registry_expiration_policies_caching,omitempty"` - ContainerRegistryExpirationPoliciesWorkerCapacity *int `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` - ContainerRegistryImportCreatedBefore *time.Time `url:"container_registry_import_created_before,omitempty" json:"container_registry_import_created_before,omitempty"` - ContainerRegistryImportMaxRetries *int `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` - ContainerRegistryImportMaxStepDuration *int `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` - ContainerRegistryImportMaxTagsCount *int `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` - ContainerRegistryImportStartMaxRetries *int `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` - ContainerRegistryImportTargetPlan *string `url:"container_registry_import_target_plan,omitempty" json:"container_registry_import_target_plan,omitempty"` - ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` - CustomHTTPCloneURLRoot *string `url:"custom_http_clone_url_root,omitempty" json:"custom_http_clone_url_root,omitempty"` - DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` - DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` - DeactivateDormantUsers *bool `url:"deactivate_dormant_users,omitempty" json:"deactivate_dormant_users,omitempty"` - DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` - DefaultBranchName *string `url:"default_branch_name,omitempty" json:"default_branch_name,omitempty"` - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` - DefaultCiConfigPath *string `url:"default_ci_config_path,omitempty" json:"default_ci_config_path,omitempty"` - DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` - DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` - DefaultProjectDeletionProtection *bool `url:"default_project_deletion_protection,omitempty" json:"default_project_deletion_protection,omitempty"` - DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` - DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` - DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` - DelayedGroupDeletion *bool `url:"delayed_group_deletion,omitempty" json:"delayed_group_deletion,omitempty"` - DelayedProjectDeletion *bool `url:"delayed_project_deletion,omitempty" json:"delayed_project_deletion,omitempty"` - DeleteInactiveProjects *bool `url:"delete_inactive_projects,omitempty" json:"delete_inactive_projects,omitempty"` - DeletionAdjournedPeriod *int `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` - DiffMaxFiles *int `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` - DiffMaxLines *int `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` - DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` - DisableFeedToken *bool `url:"disable_feed_token,omitempty" json:"disable_feed_token,omitempty"` - DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` - DisabledOauthSignInSources *[]string `url:"disabled_oauth_sign_in_sources,omitempty" json:"disabled_oauth_sign_in_sources,omitempty"` - DomainAllowlist *[]string `url:"domain_allowlist,omitempty" json:"domain_allowlist,omitempty"` - DomainDenylist *[]string `url:"domain_denylist,omitempty" json:"domain_denylist,omitempty"` - DomainDenylistEnabled *bool `url:"domain_denylist_enabled,omitempty" json:"domain_denylist_enabled,omitempty"` - ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` - ECDSASKKeyRestriction *int `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` - EKSAccessKeyID *string `url:"eks_access_key_id,omitempty" json:"eks_access_key_id,omitempty"` - EKSAccountID *string `url:"eks_account_id,omitempty" json:"eks_account_id,omitempty"` - EKSIntegrationEnabled *bool `url:"eks_integration_enabled,omitempty" json:"eks_integration_enabled,omitempty"` - EKSSecretAccessKey *string `url:"eks_secret_access_key,omitempty" json:"eks_secret_access_key,omitempty"` - Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` - Ed25519SKKeyRestriction *int `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` - ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` - ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` - ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` - ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` - ElasticsearchAnalyzersKuromojiEnabled *bool `url:"elasticsearch_analyzers_kuromoji_enabled,omitempty" json:"elasticsearch_analyzers_kuromoji_enabled,omitempty"` - ElasticsearchAnalyzersKuromojiSearch *int `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` - ElasticsearchAnalyzersSmartCNEnabled *bool `url:"elasticsearch_analyzers_smartcn_enabled,omitempty" json:"elasticsearch_analyzers_smartcn_enabled,omitempty"` - ElasticsearchAnalyzersSmartCNSearch *int `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` - ElasticsearchClientRequestTimeout *int `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` - ElasticsearchIndexedFieldLengthLimit *int `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` - ElasticsearchIndexedFileSizeLimitKB *int `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` - ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` - ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` - ElasticsearchMaxBulkConcurrency *int `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` - ElasticsearchMaxBulkSizeMB *int `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` - ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` - ElasticsearchPassword *string `url:"elasticsearch_password,omitempty" json:"elasticsearch_password,omitempty"` - ElasticsearchPauseIndexing *bool `url:"elasticsearch_pause_indexing,omitempty" json:"elasticsearch_pause_indexing,omitempty"` - ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` - ElasticsearchReplicas *int `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` - ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` - ElasticsearchShards *int `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` - ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` - ElasticsearchUsername *string `url:"elasticsearch_username,omitempty" json:"elasticsearch_username,omitempty"` - EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` - EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` - EmailRestrictions *string `url:"email_restrictions,omitempty" json:"email_restrictions,omitempty"` - EmailRestrictionsEnabled *bool `url:"email_restrictions_enabled,omitempty" json:"email_restrictions_enabled,omitempty"` - EnabledGitAccessProtocol *string `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` - EnforceNamespaceStorageLimit *bool `url:"enforce_namespace_storage_limit,omitempty" json:"enforce_namespace_storage_limit,omitempty"` - EnforcePATExpiration *bool `url:"enforce_pat_expiration,omitempty" json:"enforce_pat_expiration,omitempty"` - EnforceSSHKeyExpiration *bool `url:"enforce_ssh_key_expiration,omitempty" json:"enforce_ssh_key_expiration,omitempty"` - EnforceTerms *bool `url:"enforce_terms,omitempty" json:"enforce_terms,omitempty"` - ExternalAuthClientCert *string `url:"external_auth_client_cert,omitempty" json:"external_auth_client_cert,omitempty"` - ExternalAuthClientKey *string `url:"external_auth_client_key,omitempty" json:"external_auth_client_key,omitempty"` - ExternalAuthClientKeyPass *string `url:"external_auth_client_key_pass,omitempty" json:"external_auth_client_key_pass,omitempty"` - ExternalAuthorizationServiceDefaultLabel *string `url:"external_authorization_service_default_label,omitempty" json:"external_authorization_service_default_label,omitempty"` - ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` - ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` - ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` - ExternalPipelineValidationServiceTimeout *int `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` - ExternalPipelineValidationServiceToken *string `url:"external_pipeline_validation_service_token,omitempty" json:"external_pipeline_validation_service_token,omitempty"` - ExternalPipelineValidationServiceURL *string `url:"external_pipeline_validation_service_url,omitempty" json:"external_pipeline_validation_service_url,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` - FlocEnabled *bool `url:"floc_enabled,omitempty" json:"floc_enabled,omitempty"` - GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` - GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` - GitTwoFactorSessionExpiry *int `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` - GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` - GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` - GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` - GitpodEnabled *bool `url:"gitpod_enabled,omitempty" json:"gitpod_enabled,omitempty"` - GitpodURL *string `url:"gitpod_url,omitempty" json:"gitpod_url,omitempty"` - GitRateLimitUsersAllowlist *[]string `url:"git_rate_limit_users_allowlist,omitempty" json:"git_rate_limit_users_allowlist,omitempty"` - GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` - GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` - GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` - GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` - GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` - GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` - GroupOwnersCanManageDefaultBranchProtection *bool `url:"group_owners_can_manage_default_branch_protection,omitempty" json:"group_owners_can_manage_default_branch_protection,omitempty"` - GroupRunnerTokenExpirationInterval *int `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` - HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` - HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` - HelpPageDocumentationBaseURL *string `url:"help_page_documentation_base_url,omitempty" json:"help_page_documentation_base_url,omitempty"` - HelpPageHideCommercialContent *bool `url:"help_page_hide_commercial_content,omitempty" json:"help_page_hide_commercial_content,omitempty"` - HelpPageSupportURL *string `url:"help_page_support_url,omitempty" json:"help_page_support_url,omitempty"` - HelpPageText *string `url:"help_page_text,omitempty" json:"help_page_text,omitempty"` - HelpText *string `url:"help_text,omitempty" json:"help_text,omitempty"` - HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` - HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` - HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` - HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` - HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` - HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` - HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` - HousekeepingOptimizeRepositoryPeriod *int `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"` - ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` - InactiveProjectsDeleteAfterMonths *int `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` - InactiveProjectsMinSizeMB *int `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` - InactiveProjectsSendWarningEmailAfterMonths *int `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` - InProductMarketingEmailsEnabled *bool `url:"in_product_marketing_emails_enabled,omitempty" json:"in_product_marketing_emails_enabled,omitempty"` - InvisibleCaptchaEnabled *bool `url:"invisible_captcha_enabled,omitempty" json:"invisible_captcha_enabled,omitempty"` - IssuesCreateLimit *int `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` - KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` - KrokiEnabled *bool `url:"kroki_enabled,omitempty" json:"kroki_enabled,omitempty"` - KrokiFormats *map[string]bool `url:"kroki_formats,omitempty" json:"kroki_formats,omitempty"` - KrokiURL *string `url:"kroki_url,omitempty" json:"kroki_url,omitempty"` - LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` - LockMembershipsToLDAP *bool `url:"lock_memberships_to_ldap,omitempty" json:"lock_memberships_to_ldap,omitempty"` - LoginRecaptchaProtectionEnabled *bool `url:"login_recaptcha_protection_enabled,omitempty" json:"login_recaptcha_protection_enabled,omitempty"` - MailgunEventsEnabled *bool `url:"mailgun_events_enabled,omitempty" json:"mailgun_events_enabled,omitempty"` - MailgunSigningKey *string `url:"mailgun_signing_key,omitempty" json:"mailgun_signing_key,omitempty"` - MaintenanceMode *bool `url:"maintenance_mode,omitempty" json:"maintenance_mode,omitempty"` - MaintenanceModeMessage *string `url:"maintenance_mode_message,omitempty" json:"maintenance_mode_message,omitempty"` - MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` - MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` - MaxExportSize *int `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` - MaxImportSize *int `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` - MaxNumberOfRepositoryDownloads *int `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod *int `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` - MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` - MaxPersonalAccessTokenLifetime *int `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` - MaxSSHKeyLifetime *int `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` - MaxYAMLDepth *int `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` - MaxYAMLSizeBytes *int `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` - MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` - MinimumPasswordLength *int `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` - MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` - MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` - MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` - MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` - NPMPackageRequestsForwarding *bool `url:"npm_package_requests_forwarding,omitempty" json:"npm_package_requests_forwarding,omitempty"` - NotesCreateLimit *int `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` - NotifyOnUnknownSignIn *bool `url:"notify_on_unknown_sign_in,omitempty" json:"notify_on_unknown_sign_in,omitempty"` - OutboundLocalRequestsAllowlistRaw *string `url:"outbound_local_requests_allowlist_raw,omitempty" json:"outbound_local_requests_allowlist_raw,omitempty"` - OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` - PackageRegistryCleanupPoliciesWorkerCapacity *int `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` - PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` - PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` - PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` - PasswordNumberRequired *bool `url:"password_number_required,omitempty" json:"password_number_required,omitempty"` - PasswordSymbolRequired *bool `url:"password_symbol_required,omitempty" json:"password_symbol_required,omitempty"` - PasswordUppercaseRequired *bool `url:"password_uppercase_required,omitempty" json:"password_uppercase_required,omitempty"` - PasswordLowercaseRequired *bool `url:"password_lowercase_required,omitempty" json:"password_lowercase_required,omitempty"` - PerformanceBarAllowedGroupID *int `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` - PerformanceBarAllowedGroupPath *string `url:"performance_bar_allowed_group_path,omitempty" json:"performance_bar_allowed_group_path,omitempty"` - PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` - PersonalAccessTokenPrefix *string `url:"personal_access_token_prefix,omitempty" json:"personal_access_token_prefix,omitempty"` - PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` - PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` - PipelineLimitPerProjectUserSha *int `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` - PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` - PreventMergeRequestsAuthorApproval *bool `url:"prevent_merge_requests_author_approval,omitempty" json:"prevent_merge_requests_author_approval,omitempty"` - PreventMergeRequestsCommittersApproval *bool `url:"prevent_merge_requests_committers_approval,omitempty" json:"prevent_merge_requests_committers_approval,omitempty"` - ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` - ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` - ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` - ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` - ProjectRunnerTokenExpirationInterval *int `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` - PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` - ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` - PseudonymizerEnabled *bool `url:"pseudonymizer_enabled,omitempty" json:"pseudonymizer_enabled,omitempty"` - PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` - PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` - PyPIPackageRequestsForwarding *bool `url:"pypi_package_requests_forwarding,omitempty" json:"pypi_package_requests_forwarding,omitempty"` - RSAKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` - RateLimitingResponseText *string `url:"rate_limiting_response_text,omitempty" json:"rate_limiting_response_text,omitempty"` - RawBlobRequestLimit *int `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` - RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` - RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` - RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` - ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` - RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` - RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` - RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` - RepositoryStoragesWeighted *map[string]int `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` - RequireAdminApprovalAfterUserSignup *bool `url:"require_admin_approval_after_user_signup,omitempty" json:"require_admin_approval_after_user_signup,omitempty"` - RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` - RunnerTokenExpirationInterval *int `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` - SearchRateLimit *int `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` - SearchRateLimitUnauthenticated *int `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` - SecretDetectionRevocationTokenTypesURL *string `url:"secret_detection_revocation_token_types_url,omitempty" json:"secret_detection_revocation_token_types_url,omitempty"` - SecretDetectionTokenRevocationEnabled *bool `url:"secret_detection_token_revocation_enabled,omitempty" json:"secret_detection_token_revocation_enabled,omitempty"` - SecretDetectionTokenRevocationToken *string `url:"secret_detection_token_revocation_token,omitempty" json:"secret_detection_token_revocation_token,omitempty"` - SecretDetectionTokenRevocationURL *string `url:"secret_detection_token_revocation_url,omitempty" json:"secret_detection_token_revocation_url,omitempty"` - SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` - SentryClientsideDSN *string `url:"sentry_clientside_dsn,omitempty" json:"sentry_clientside_dsn,omitempty"` - SentryDSN *string `url:"sentry_dsn,omitempty" json:"sentry_dsn,omitempty"` - SentryEnabled *string `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` - SentryEnvironment *string `url:"sentry_environment,omitempty" json:"sentry_environment,omitempty"` - SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` - SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` - SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` - SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` - SidekiqJobLimiterCompressionThresholdBytes *int `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` - SidekiqJobLimiterLimitBytes *int `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` - SidekiqJobLimiterMode *string `url:"sidekiq_job_limiter_mode,omitempty" json:"sidekiq_job_limiter_mode,omitempty"` - SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` - SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` - SlackAppEnabled *bool `url:"slack_app_enabled,omitempty" json:"slack_app_enabled,omitempty"` - SlackAppID *string `url:"slack_app_id,omitempty" json:"slack_app_id,omitempty"` - SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` - SlackAppSigningSecret *string `url:"slack_app_signing_secret,omitempty" json:"slack_app_signing_secret,omitempty"` - SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` - SnippetSizeLimit *int `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` - SnowplowAppID *string `url:"snowplow_app_id,omitempty" json:"snowplow_app_id,omitempty"` - SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` - SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` - SnowplowEnabled *bool `url:"snowplow_enabled,omitempty" json:"snowplow_enabled,omitempty"` - SourcegraphEnabled *bool `url:"sourcegraph_enabled,omitempty" json:"sourcegraph_enabled,omitempty"` - SourcegraphPublicOnly *bool `url:"sourcegraph_public_only,omitempty" json:"sourcegraph_public_only,omitempty"` - SourcegraphURL *string `url:"sourcegraph_url,omitempty" json:"sourcegraph_url,omitempty"` - SpamCheckAPIKey *string `url:"spam_check_api_key,omitempty" json:"spam_check_api_key,omitempty"` - SpamCheckEndpointEnabled *bool `url:"spam_check_endpoint_enabled,omitempty" json:"spam_check_endpoint_enabled,omitempty"` - SpamCheckEndpointURL *string `url:"spam_check_endpoint_url,omitempty" json:"spam_check_endpoint_url,omitempty"` - SuggestPipelineEnabled *bool `url:"suggest_pipeline_enabled,omitempty" json:"suggest_pipeline_enabled,omitempty"` - TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` - Terms *string `url:"terms,omitempty" json:"terms,omitempty"` - ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` - ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` - ThrottleAuthenticatedDeprecatedAPIEnabled *bool `url:"throttle_authenticated_deprecated_api_enabled,omitempty" json:"throttle_authenticated_deprecated_api_enabled,omitempty"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` - ThrottleAuthenticatedFilesAPIEnabled *bool `url:"throttle_authenticated_files_api_enabled,omitempty" json:"throttle_authenticated_files_api_enabled,omitempty"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` - ThrottleAuthenticatedGitLFSEnabled *bool `url:"throttle_authenticated_git_lfs_enabled,omitempty" json:"throttle_authenticated_git_lfs_enabled,omitempty"` - ThrottleAuthenticatedGitLFSPeriodInSeconds *int `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` - ThrottleAuthenticatedPackagesAPIEnabled *bool `url:"throttle_authenticated_packages_api_enabled,omitempty" json:"throttle_authenticated_packages_api_enabled,omitempty"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` - ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` - ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` - ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` - ThrottleIncidentManagementNotificationEnabled *bool `url:"throttle_incident_management_notification_enabled,omitempty" json:"throttle_incident_management_notification_enabled,omitempty"` - ThrottleIncidentManagementNotificationPerPeriod *int `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` - ThrottleIncidentManagementNotificationPeriodInSeconds *int `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` - ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` - ThrottleProtectedPathsPeriodInSeconds *int `url:"throttle_protected_paths_enabled_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` - ThrottleProtectedPathsRequestsPerPeriod *int `url:"throttle_protected_paths_enabled_requests_per_period,omitempty" json:"throttle_protected_paths_per_period,omitempty"` - ThrottleUnauthenticatedAPIEnabled *bool `url:"throttle_unauthenticated_api_enabled,omitempty" json:"throttle_unauthenticated_api_enabled,omitempty"` - ThrottleUnauthenticatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIEnabled *bool `url:"throttle_unauthenticated_deprecated_api_enabled,omitempty" json:"throttle_unauthenticated_deprecated_api_enabled,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` - ThrottleUnauthenticatedFilesAPIEnabled *bool `url:"throttle_unauthenticated_files_api_enabled,omitempty" json:"throttle_unauthenticated_files_api_enabled,omitempty"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedGitLFSEnabled *bool `url:"throttle_unauthenticated_git_lfs_enabled,omitempty" json:"throttle_unauthenticated_git_lfs_enabled,omitempty"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds *int `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` - ThrottleUnauthenticatedPackagesAPIEnabled *bool `url:"throttle_unauthenticated_packages_api_enabled,omitempty" json:"throttle_unauthenticated_packages_api_enabled,omitempty"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` - ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` - ThrottleUnauthenticatedWebEnabled *bool `url:"throttle_unauthenticated_web_enabled,omitempty" json:"throttle_unauthenticated_web_enabled,omitempty"` - ThrottleUnauthenticatedWebPeriodInSeconds *int `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` - ThrottleUnauthenticatedWebRequestsPerPeriod *int `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` - TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` - UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` - UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` - UpdatingNameDisabledForUsers *bool `url:"updating_name_disabled_for_users,omitempty" json:"updating_name_disabled_for_users,omitempty"` - UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` - UsagePingFeaturesEnabled *bool `url:"usage_ping_features_enabled,omitempty" json:"usage_ping_features_enabled,omitempty"` - UserDeactivationEmailsEnabled *bool `url:"user_deactivation_emails_enabled,omitempty" json:"user_deactivation_emails_enabled,omitempty"` - UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` - UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` - UserEmailLookupLimit *int `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` - UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` - UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` - UsersGetByIDLimit *int `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` - UsersGetByIDLimitAllowlistRaw *string `url:"users_get_by_id_limit_allowlist_raw,omitempty" json:"users_get_by_id_limit_allowlist_raw,omitempty"` - VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` - WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` - WhatsNewVariant *string `url:"whats_new_variant,omitempty" json:"whats_new_variant,omitempty"` - WikiPageMaxContentBytes *int `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` -} - -// UpdateSettings updates the application settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/settings.html#change-application-settings -func (s *SettingsService) UpdateSettings(opt *UpdateSettingsOptions, options ...RequestOptionFunc) (*Settings, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "application/settings", opt, options) - if err != nil { - return nil, nil, err - } - - as := new(Settings) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/.gitignore b/vendor/gitlab.com/gitlab-org/api/client-go/.gitignore similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/.gitignore rename to vendor/gitlab.com/gitlab-org/api/client-go/.gitignore diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/.gitlab-ci.yml b/vendor/gitlab.com/gitlab-org/api/client-go/.gitlab-ci.yml new file mode 100644 index 00000000..dfbfd9f8 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/.gitlab-ci.yml @@ -0,0 +1,147 @@ +workflow: + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_TAG + - if: $CI_COMMIT_REF_PROTECTED == "true" + +include: + - component: ${CI_SERVER_FQDN}/gitlab-org/components/danger-review/danger-review@2.0.0 + inputs: + job_stage: lint + job_allow_failure: true + +stages: + - lint + - test + - deploy + +.go:versions: + parallel: + matrix: + - GOLANG_IMAGE_VERSION: + - '1.22' + - '1.23' + - '1.24' + +.go:base: + # From: https://docs.gitlab.com/ee/ci/caching/#cache-go-dependencies + variables: + GOPATH: $CI_PROJECT_DIR/.go + GOLANGCI_LINT_CACHE: $CI_PROJECT_DIR/.golangci-lint + before_script: + - mkdir -p "${GOPATH}" "${GOLANGCI_LINT_CACHE}" + cache: + paths: + - $GOPATH/pkg/mod/ + - $GOLANGCI_LINT_CACHE/ + key: + files: + - go.sum + # We want to speed up CI a bit. + # Community contributors are recommended to use the Community fork + # which has access to this runners. + # For other forks to free tier namespaces this might fail, + # which is a good reminder to use the Community fork and not + # to accidentally burn to personal compute minutes. + tags: + - saas-linux-large-amd64 + # We only need to run Go-related jobs when actual Go files changed + # or when running either on the default branch or for a tag. + rules: + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: $CI_COMMIT_TAG + - changes: + - '**/*.go' + - testdata/** + - go.mod + - go.sum + - .gitlab-ci.yml + +golangci-lint: + extends: + - .go:base + stage: lint + needs: [] + variables: + REPORT_FILENAME: 'gl-code-quality-report.json' + image: golangci/golangci-lint:v1.64.5 + script: + - golangci-lint run --print-issued-lines=false --out-format code-climate:$REPORT_FILENAME,line-number + artifacts: + reports: + codequality: $REPORT_FILENAME + paths: [$REPORT_FILENAME] + when: always + +tests:unit: + extends: + - .go:base + - .go:versions + stage: test + needs: [] + image: golang:$GOLANG_IMAGE_VERSION + variables: + # configure tooling versions + GOTESTSUM_VERSION: 'v1.12.0' + GOCOVER_COBERTURA_VERSION: 'v1.2.1-0.20240107185409-0818f3538137' + + # configure artifact files + JUNIT_FILENAME: tests.xml + COVERPROFILE_FILENAME: coverage.out + COVERPROFILE_XML_FILENAME: coverage.xml + script: + - go run gotest.tools/gotestsum@${GOTESTSUM_VERSION} --format=standard-quiet --junitfile=$JUNIT_FILENAME -- -race -coverprofile=$COVERPROFILE_FILENAME -covermode=atomic ./... + - go run github.com/boumenot/gocover-cobertura@${GOCOVER_COBERTURA_VERSION} < $COVERPROFILE_FILENAME > $COVERPROFILE_XML_FILENAME + - go tool cover -func $COVERPROFILE_FILENAME + coverage: '/total:.+\(statements\).+\d+\.\d+/' + artifacts: + paths: + - $JUNIT_FILENAME + - $COVERPROFILE_XML_FILENAME + reports: + junit: $JUNIT_FILENAME + coverage_report: + path: $COVERPROFILE_XML_FILENAME + coverage_format: cobertura + when: always + +generate-release-notes: + stage: deploy + needs: [] + image: alpine:3.21.2 + before_script: + - apk add --update jq curl git + script: + - | + if [ -z "$CI_COMMIT_TAG" ]; then + last_stable_version_sha="$(git tag | grep -E '^v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)$' | sort -Vr | head -n1)" + version="${last_stable_version_sha}+${CI_COMMIT_SHA}" + else + version="$CI_COMMIT_TAG" + fi + urlencoded_version="$(jq -rn --arg x "${version}" '$x|@uri')" + - echo "Generating release notes for ${version} (urlencoded=${urlencoded_version}) ..." + - 'curl --fail-with-body --header "JOB-TOKEN: $CI_JOB_TOKEN" "$CI_API_V4_URL/projects/$CI_PROJECT_ID/repository/changelog?version=${urlencoded_version}" | jq -r .notes > release-notes.md' + - cat release-notes.md + artifacts: + paths: + - release-notes.md + +release: + stage: deploy + rules: + - if: $CI_COMMIT_TAG + needs: + - golangci-lint + - tests:unit + - job: generate-release-notes + artifacts: true + image: registry.gitlab.com/gitlab-org/release-cli:latest + script: + - echo "Create release for $CI_COMMIT_TAG" + release: + tag_name: '$CI_COMMIT_TAG' + tag_message: 'Version $CI_COMMIT_TAG' + name: '$CI_COMMIT_TAG' + description: release-notes.md diff --git a/vendor/github.com/xanzy/go-gitlab/.golangci.yml b/vendor/gitlab.com/gitlab-org/api/client-go/.golangci.yml similarity index 88% rename from vendor/github.com/xanzy/go-gitlab/.golangci.yml rename to vendor/gitlab.com/gitlab-org/api/client-go/.golangci.yml index 2d74c0df..49c2c7aa 100644 --- a/vendor/github.com/xanzy/go-gitlab/.golangci.yml +++ b/vendor/gitlab.com/gitlab-org/api/client-go/.golangci.yml @@ -10,7 +10,8 @@ run: # Output configuration options output: - format: line-number + formats: + - format: line-number # All available settings of specific linters linters-settings: @@ -18,21 +19,24 @@ linters-settings: locale: US ignore-words: - noteable + revive: + enable-all-rules: false + rules: + - name: deep-exit linters: enable: - asciicheck - dogsled - errorlint - - exportloopref - goconst - gosimple - govet - ineffassign - - megacheck - misspell - nakedret - nolintlint + - revive - staticcheck - typecheck - unconvert diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/.tool-versions b/vendor/gitlab.com/gitlab-org/api/client-go/.tool-versions new file mode 100644 index 00000000..3c1fdba6 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/.tool-versions @@ -0,0 +1 @@ +golang 1.22.10 diff --git a/vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md b/vendor/gitlab.com/gitlab-org/api/client-go/CONTRIBUTING.md similarity index 53% rename from vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md rename to vendor/gitlab.com/gitlab-org/api/client-go/CONTRIBUTING.md index 32bd8227..76f086c3 100644 --- a/vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md +++ b/vendor/gitlab.com/gitlab-org/api/client-go/CONTRIBUTING.md @@ -4,21 +4,31 @@ We want to make contributing to this project as easy as possible. ## Reporting Issues -If you have an issue, please report it on the [issue tracker](https://github.com/xanzy/go-gitlab/issues). +If you have an issue, please report it on the +[issue tracker](https://gitlab.com/gitlab-org/api/client-go/-/issues). -When you are up for writing a PR to solve the issue you encountered, it's not -needed to first open a separate issue. In that case only opening a PR with a +When you are up for writing a MR to solve the issue you encountered, it's not +needed to first open a separate issue. In that case only opening a MR with a description of the issue you are trying to solve is just fine. ## Contributing Code -Pull requests are always welcome. When in doubt if your contribution fits within +Merge requests are always welcome. When in doubt if your contribution fits within the rest of the project, feel free to first open an issue to discuss your idea. This is not needed when fixing a bug or adding an enhancement, as long as the enhancement you are trying to add can be found in the public GitLab API docs as this project only supports what is in the public API docs. +### Use community fork to contribute + +To contribute to this project we recommend that you use the +[community fork](https://gitlab.com/gitlab-community/api/client-go). +Have a look at the +[community fork README](https://gitlab.com/gitlab-community#gitlab-community-forks) +to learn more about what it is and why you should prefer it over +creating your own fork to contribute. + ## Coding style We try to follow the Go best practices, where it makes sense, and use @@ -26,20 +36,20 @@ We try to follow the Go best practices, where it makes sense, and use As a general rule of thumb we prefer to keep line width for comments below 80 chars and for code (where possible and sensible) below 100 chars. -Before making a PR, please look at the rest this package and try to make sure +Before making a MR, please look at the rest this package and try to make sure your contribution is consistent with the rest of the coding style. -New struct field or methods should be placed (as much as possible) in the same +New `struct` fields or methods should be placed (as much as possible) in the same order as the ordering used in the public API docs. The idea is that this makes it easier to find things. -### Setting up your local development environment to Contribute to `go-gitlab` +### Setting up your local development environment to contribute -1. [Fork](https://github.com/xanzy/go-gitlab/fork), then clone the repository. +1. [Fork](https://gitlab.com/gitlab-org/api/client-go), then clone the repository. ```sh - git clone https://github.com//go-gitlab.git + git clone https://gitlab.com//client-go.git # or via ssh - git clone git@github.com:/go-gitlab.git + git clone git@gitlab.com:/client-go.git ``` 1. Install dependencies: ```sh @@ -50,4 +60,4 @@ easier to find things. ```sh make test && make fmt ``` -1. Open up your pull request +1. Open up your merge request diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/Dangerfile b/vendor/gitlab.com/gitlab-org/api/client-go/Dangerfile new file mode 100644 index 00000000..13606f27 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/Dangerfile @@ -0,0 +1,11 @@ +require 'gitlab-dangerfiles' + +# see https://docs.gitlab.com/ee/development/dangerbot.html#enable-danger-on-a-project +# see https://gitlab.com/gitlab-org/ruby/gems/gitlab-dangerfiles +Gitlab::Dangerfiles.for_project(self, 'gitlab-api-client-go') do |dangerfiles| + # Import all plugins from the gem + dangerfiles.import_plugins + + # Import a defined set of danger rules + dangerfiles.import_dangerfiles(only: %w[simple_roulette changelog metadata type_label z_add_labels z_retry_link]) +end diff --git a/vendor/github.com/xanzy/go-gitlab/LICENSE b/vendor/gitlab.com/gitlab-org/api/client-go/LICENSE similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/LICENSE rename to vendor/gitlab.com/gitlab-org/api/client-go/LICENSE diff --git a/vendor/github.com/xanzy/go-gitlab/Makefile b/vendor/gitlab.com/gitlab-org/api/client-go/Makefile similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/Makefile rename to vendor/gitlab.com/gitlab-org/api/client-go/Makefile diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/README.md b/vendor/gitlab.com/gitlab-org/api/client-go/README.md new file mode 100644 index 00000000..23c554ab --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/README.md @@ -0,0 +1,113 @@ +# GitLab client-go (former `github.com/xanzy/go-gitlab`) + +A GitLab API client enabling Go programs to interact with GitLab in a simple and uniform way. + +## Usage + +```go +import "gitlab.com/gitlab-org/api/client-go" +``` + +Construct a new GitLab client, then use the various services on the client to +access different parts of the GitLab API. For example, to list all +users: + +```go +git, err := gitlab.NewClient("yourtokengoeshere") +if err != nil { + log.Fatalf("Failed to create client: %v", err) +} +users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) +``` + +There are a few `With...` option functions that can be used to customize +the API client. For example, to set a custom base URL: + +```go +git, err := gitlab.NewClient("yourtokengoeshere", gitlab.WithBaseURL("https://git.mydomain.com/api/v4")) +if err != nil { + log.Fatalf("Failed to create client: %v", err) +} +users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) +``` + +Some API methods have optional parameters that can be passed. For example, +to list all projects for user "svanharmelen": + +```go +git := gitlab.NewClient("yourtokengoeshere") +opt := &gitlab.ListProjectsOptions{Search: gitlab.Ptr("svanharmelen")} +projects, _, err := git.Projects.ListProjects(opt) +``` + +### Examples + +The [examples](/examples) directory +contains a couple for clear examples, of which one is partially listed here as well: + +```go +package main + +import ( + "log" + + "gitlab.com/gitlab-org/api/client-go" +) + +func main() { + git, err := gitlab.NewClient("yourtokengoeshere") + if err != nil { + log.Fatalf("Failed to create client: %v", err) + } + + // Create new project + p := &gitlab.CreateProjectOptions{ + Name: gitlab.Ptr("My Project"), + Description: gitlab.Ptr("Just a test project to play with"), + MergeRequestsAccessLevel: gitlab.Ptr(gitlab.EnabledAccessControl), + SnippetsAccessLevel: gitlab.Ptr(gitlab.EnabledAccessControl), + Visibility: gitlab.Ptr(gitlab.PublicVisibility), + } + project, _, err := git.Projects.CreateProject(p) + if err != nil { + log.Fatal(err) + } + + // Add a new snippet + s := &gitlab.CreateProjectSnippetOptions{ + Title: gitlab.Ptr("Dummy Snippet"), + FileName: gitlab.Ptr("snippet.go"), + Content: gitlab.Ptr("package main...."), + Visibility: gitlab.Ptr(gitlab.PublicVisibility), + } + _, _, err = git.ProjectSnippets.CreateSnippet(project.ID, s) + if err != nil { + log.Fatal(err) + } +} +``` + +For complete usage of go-gitlab, see the full [package docs](https://godoc.org/gitlab.com/gitlab-org/api/client-go). + +## Contributing + +Contributions are always welcome. For more information, check out the +[contributing guide](/CONTRIBUTING.md). + +## Maintenance + +This is a community maintained project. If you have a paid GitLab subscription, +please note that this project is not packaged as a part of GitLab, and falls outside +of the scope of support. + +For more information, see GitLab's +[Statement of Support](https://about.gitlab.com/support/statement-of-support.html). +Please fill out an issue in this projects issue tracker and someone from the community +will respond as soon as they are available to help you. + +### Known GitLab Projects using this package + +- [GitLab Terraform Provider](https://gitlab.com/gitlab-org/terraform-provider-gitlab) + maintained by the community with support from ~"group::environments" +- [GitLab CLI (`glab`)](https://gitlab.com/gitlab-org/cli) + maintained by ~"group::code review" diff --git a/vendor/github.com/xanzy/go-gitlab/access_requests.go b/vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/access_requests.go rename to vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go diff --git a/vendor/github.com/xanzy/go-gitlab/appearance.go b/vendor/gitlab.com/gitlab-org/api/client-go/appearance.go similarity index 96% rename from vendor/github.com/xanzy/go-gitlab/appearance.go rename to vendor/gitlab.com/gitlab-org/api/client-go/appearance.go index f21893c0..8597fa9b 100644 --- a/vendor/github.com/xanzy/go-gitlab/appearance.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/appearance.go @@ -38,6 +38,7 @@ type Appearance struct { Logo string `json:"logo"` HeaderLogo string `json:"header_logo"` Favicon string `json:"favicon"` + MemberGuidelines string `json:"member_guidelines"` NewProjectGuidelines string `json:"new_project_guidelines"` ProfileImageGuidelines string `json:"profile_image_guidelines"` HeaderMessage string `json:"header_message"` @@ -80,6 +81,7 @@ type ChangeAppearanceOptions struct { Logo *string `url:"logo,omitempty" json:"logo,omitempty"` HeaderLogo *string `url:"header_logo,omitempty" json:"header_logo,omitempty"` Favicon *string `url:"favicon,omitempty" json:"favicon,omitempty"` + MemberGuidelines *string `url:"member_guidelines,omitempty" json:"member_guidelines,omitempty"` NewProjectGuidelines *string `url:"new_project_guidelines,omitempty" json:"new_project_guidelines,omitempty"` ProfileImageGuidelines *string `url:"profile_image_guidelines,omitempty" json:"profile_image_guidelines,omitempty"` HeaderMessage *string `url:"header_message,omitempty" json:"header_message,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/applications.go b/vendor/gitlab.com/gitlab-org/api/client-go/applications.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/applications.go rename to vendor/gitlab.com/gitlab-org/api/client-go/applications.go diff --git a/vendor/github.com/xanzy/go-gitlab/audit_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/audit_events.go similarity index 98% rename from vendor/github.com/xanzy/go-gitlab/audit_events.go rename to vendor/gitlab.com/gitlab-org/api/client-go/audit_events.go index de312e56..f51415c7 100644 --- a/vendor/github.com/xanzy/go-gitlab/audit_events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/audit_events.go @@ -14,6 +14,7 @@ type AuditEvent struct { AuthorID int `json:"author_id"` EntityID int `json:"entity_id"` EntityType string `json:"entity_type"` + EventName string `json:"event_name"` Details AuditEventDetails `json:"details"` CreatedAt *time.Time `json:"created_at"` EventType string `json:"event_type"` @@ -42,6 +43,7 @@ type AuditEventDetails struct { IPAddress string `json:"ip_address"` EntityPath string `json:"entity_path"` FailedLogin string `json:"failed_login"` + EventName string `json:"event_name"` } // AuditEventsService handles communication with the project/group/instance diff --git a/vendor/github.com/xanzy/go-gitlab/avatar.go b/vendor/gitlab.com/gitlab-org/api/client-go/avatar.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/avatar.go rename to vendor/gitlab.com/gitlab-org/api/client-go/avatar.go diff --git a/vendor/github.com/xanzy/go-gitlab/award_emojis.go b/vendor/gitlab.com/gitlab-org/api/client-go/award_emojis.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/award_emojis.go rename to vendor/gitlab.com/gitlab-org/api/client-go/award_emojis.go diff --git a/vendor/github.com/xanzy/go-gitlab/boards.go b/vendor/gitlab.com/gitlab-org/api/client-go/boards.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/boards.go rename to vendor/gitlab.com/gitlab-org/api/client-go/boards.go diff --git a/vendor/github.com/xanzy/go-gitlab/branches.go b/vendor/gitlab.com/gitlab-org/api/client-go/branches.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/branches.go rename to vendor/gitlab.com/gitlab-org/api/client-go/branches.go diff --git a/vendor/github.com/xanzy/go-gitlab/broadcast_messages.go b/vendor/gitlab.com/gitlab-org/api/client-go/broadcast_messages.go similarity index 96% rename from vendor/github.com/xanzy/go-gitlab/broadcast_messages.go rename to vendor/gitlab.com/gitlab-org/api/client-go/broadcast_messages.go index 3d0c61d9..2fbe9924 100644 --- a/vendor/github.com/xanzy/go-gitlab/broadcast_messages.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/broadcast_messages.go @@ -30,7 +30,7 @@ type BroadcastMessagesService struct { client *Client } -// BroadcastMessage represents a GitLab issue board. +// BroadcastMessage represents a GitLab broadcast message. // // GitLab API docs: // https://docs.gitlab.com/ee/api/broadcast_messages.html#get-all-broadcast-messages @@ -45,6 +45,7 @@ type BroadcastMessage struct { TargetPath string `json:"target_path"` BroadcastType string `json:"broadcast_type"` Dismissable bool `json:"dismissable"` + Theme string `json:"theme"` // Deprecated: This parameter was removed in GitLab 15.6. Color string `json:"color"` @@ -111,6 +112,7 @@ type CreateBroadcastMessageOptions struct { TargetPath *string `url:"target_path,omitempty" json:"target_path,omitempty"` BroadcastType *string `url:"broadcast_type,omitempty" json:"broadcast_type,omitempty"` Dismissable *bool `url:"dismissable,omitempty" json:"dismissable,omitempty"` + Theme *string `url:"theme,omitempty" json:"theme,omitempty"` // Deprecated: This parameter was removed in GitLab 15.6. Color *string `url:"color,omitempty" json:"color,omitempty"` @@ -149,6 +151,7 @@ type UpdateBroadcastMessageOptions struct { TargetPath *string `url:"target_path,omitempty" json:"target_path,omitempty"` BroadcastType *string `url:"broadcast_type,omitempty" json:"broadcast_type,omitempty"` Dismissable *bool `url:"dismissable,omitempty" json:"dismissable,omitempty"` + Theme *string `url:"theme,omitempty" json:"theme,omitempty"` // Deprecated: This parameter was removed in GitLab 15.6. Color *string `url:"color,omitempty" json:"color,omitempty"` diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/bulk_imports.go b/vendor/gitlab.com/gitlab-org/api/client-go/bulk_imports.go new file mode 100644 index 00000000..4786135e --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/bulk_imports.go @@ -0,0 +1,72 @@ +package gitlab + +import ( + "net/http" + "time" +) + +// BulkImportsService handles communication with GitLab's direct transfer API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/bulk_imports.html +type BulkImportsService struct { + client *Client +} + +// BulkImportStartMigrationConfiguration represents the available configuration options to start a migration. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/bulk_imports.html#start-a-new-group-or-project-migration +type BulkImportStartMigrationConfiguration struct { + URL *string `json:"url,omitempty"` + AccessToken *string `json:"access_token,omitempty"` +} + +// BulkImportStartMigrationEntity represents the available entity options to start a migration. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/bulk_imports.html#start-a-new-group-or-project-migration +type BulkImportStartMigrationEntity struct { + SourceType *string `json:"source_type,omitempty"` + SourceFullPath *string `json:"source_full_path,omitempty"` + DestinationSlug *string `json:"destination_slug,omitempty"` + DestinationNamespace *string `json:"destination_namespace,omitempty"` + MigrateProjects *bool `json:"migrate_projects,omitempty"` + MigrateMemberships *bool `json:"migrate_memberships,omitempty"` +} + +// BulkImportStartMigrationOptions represents the available start migration options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/bulk_imports.html#start-a-new-group-or-project-migration +type BulkImportStartMigrationOptions struct { + Configuration *BulkImportStartMigrationConfiguration `json:"configuration,omitempty"` + Entities []BulkImportStartMigrationEntity `json:"entities,omitempty"` +} + +// BulkImportStartMigrationResponse represents the start migration response. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/bulk_imports.html#start-a-new-group-or-project-migration +type BulkImportStartMigrationResponse struct { + ID int `json:"id"` + Status string `json:"status"` + SourceType string `json:"source_type"` + SourceURL string `json:"source_url"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + HasFailures bool `json:"has_failures"` +} + +// StartMigration starts a migration. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/bulk_imports.html#start-a-new-group-or-project-migration +func (b *BulkImportsService) StartMigration(startMigrationOptions *BulkImportStartMigrationOptions, options ...RequestOptionFunc) (*BulkImportStartMigrationResponse, *Response, error) { + request, err := b.client.NewRequest(http.MethodPost, "bulk_imports", startMigrationOptions, options) + if err != nil { + return nil, nil, err + } + + startMigrationResponse := new(BulkImportStartMigrationResponse) + response, err := b.client.Do(request, startMigrationResponse) + if err != nil { + return nil, response, err + } + + return startMigrationResponse, response, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/ci_yml_templates.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go rename to vendor/gitlab.com/gitlab-org/api/client-go/ci_yml_templates.go diff --git a/vendor/github.com/xanzy/go-gitlab/client_options.go b/vendor/gitlab.com/gitlab-org/api/client-go/client_options.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/client_options.go rename to vendor/gitlab.com/gitlab-org/api/client-go/client_options.go diff --git a/vendor/github.com/xanzy/go-gitlab/cluster_agents.go b/vendor/gitlab.com/gitlab-org/api/client-go/cluster_agents.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/cluster_agents.go rename to vendor/gitlab.com/gitlab-org/api/client-go/cluster_agents.go diff --git a/vendor/github.com/xanzy/go-gitlab/commits.go b/vendor/gitlab.com/gitlab-org/api/client-go/commits.go similarity index 93% rename from vendor/github.com/xanzy/go-gitlab/commits.go rename to vendor/gitlab.com/gitlab-org/api/client-go/commits.go index b87dd9f1..c1a9ef3d 100644 --- a/vendor/github.com/xanzy/go-gitlab/commits.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/commits.go @@ -35,24 +35,25 @@ type CommitsService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/commits.html type Commit struct { - ID string `json:"id"` - ShortID string `json:"short_id"` - Title string `json:"title"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - AuthoredDate *time.Time `json:"authored_date"` - CommitterName string `json:"committer_name"` - CommitterEmail string `json:"committer_email"` - CommittedDate *time.Time `json:"committed_date"` - CreatedAt *time.Time `json:"created_at"` - Message string `json:"message"` - ParentIDs []string `json:"parent_ids"` - Stats *CommitStats `json:"stats"` - Status *BuildStateValue `json:"status"` - LastPipeline *PipelineInfo `json:"last_pipeline"` - ProjectID int `json:"project_id"` - Trailers map[string]string `json:"trailers"` - WebURL string `json:"web_url"` + ID string `json:"id"` + ShortID string `json:"short_id"` + Title string `json:"title"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthoredDate *time.Time `json:"authored_date"` + CommitterName string `json:"committer_name"` + CommitterEmail string `json:"committer_email"` + CommittedDate *time.Time `json:"committed_date"` + CreatedAt *time.Time `json:"created_at"` + Message string `json:"message"` + ParentIDs []string `json:"parent_ids"` + Stats *CommitStats `json:"stats"` + Status *BuildStateValue `json:"status"` + LastPipeline *PipelineInfo `json:"last_pipeline"` + ProjectID int `json:"project_id"` + Trailers map[string]string `json:"trailers"` + ExtendedTrailers map[string]string `json:"extended_trailers"` + WebURL string `json:"web_url"` } // CommitStats represents the number of added and deleted files in a commit. @@ -151,11 +152,19 @@ func (s *CommitsService) GetCommitRefs(pid interface{}, sha string, opt *GetComm return cs, resp, nil } +// GetCommitOptions represents the available GetCommit() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit +type GetCommitOptions struct { + Stats *bool `url:"stats,omitempty" json:"stats,omitempty"` +} + // GetCommit gets a specific commit identified by the commit hash or name of a // branch or tag. // // GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit -func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...RequestOptionFunc) (*Commit, *Response, error) { +func (s *CommitsService) GetCommit(pid interface{}, sha string, opt *GetCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err @@ -165,7 +174,7 @@ func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...Reque } u := fmt.Sprintf("projects/%s/repository/commits/%s", PathEscape(project), url.PathEscape(sha)) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/xanzy/go-gitlab/container_registry.go b/vendor/gitlab.com/gitlab-org/api/client-go/container_registry.go similarity index 99% rename from vendor/github.com/xanzy/go-gitlab/container_registry.go rename to vendor/gitlab.com/gitlab-org/api/client-go/container_registry.go index eaf0e346..bec477df 100644 --- a/vendor/github.com/xanzy/go-gitlab/container_registry.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/container_registry.go @@ -41,6 +41,7 @@ type RegistryRepository struct { Location string `json:"location"` CreatedAt *time.Time `json:"created_at"` CleanupPolicyStartedAt *time.Time `json:"cleanup_policy_started_at"` + Status *ContainerRegistryStatus `json:"status"` TagsCount int `json:"tags_count"` Tags []*RegistryRepositoryTag `json:"tags"` } diff --git a/vendor/github.com/xanzy/go-gitlab/custom_attributes.go b/vendor/gitlab.com/gitlab-org/api/client-go/custom_attributes.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/custom_attributes.go rename to vendor/gitlab.com/gitlab-org/api/client-go/custom_attributes.go diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/dependency_list_export.go b/vendor/gitlab.com/gitlab-org/api/client-go/dependency_list_export.go new file mode 100644 index 00000000..c1e786e8 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/dependency_list_export.go @@ -0,0 +1,122 @@ +package gitlab + +import ( + "bytes" + "fmt" + "io" + "net/http" +) + +type DependencyListExportService struct { + client *Client +} + +// CreateDependencyListExportOptions represents the available CreateDependencyListExport() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/dependency_list_export.html#create-a-pipeline-level-dependency-list-export +type CreateDependencyListExportOptions struct { + ExportType *string `url:"export_type" json:"export_type"` +} + +// DependencyListExport represents a request for a GitLab project's dependency list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/dependency_list_export.html#create-a-pipeline-level-dependency-list-export +type DependencyListExport struct { + ID int `json:"id"` + HasFinished bool `json:"has_finished"` + Self string `json:"self"` + Download string `json:"download"` +} + +const defaultExportType = "sbom" + +// CreateDependencyListExport creates a new CycloneDX JSON export for all the project dependencies +// detected in a pipeline. +// +// If an authenticated user does not have permission to read_dependency, this request returns a 403 +// Forbidden status code. +// +// SBOM exports can be only accessed by the export’s author. +// +// GitLab docs: +// https://docs.gitlab.com/ee/api/dependency_list_export.html#create-a-pipeline-level-dependency-list-export +func (s *DependencyListExportService) CreateDependencyListExport(pipelineID int, opt *CreateDependencyListExportOptions, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) { + // POST /pipelines/:id/dependency_list_exports + createExportPath := fmt.Sprintf("pipelines/%d/dependency_list_exports", pipelineID) + + if opt == nil { + opt = &CreateDependencyListExportOptions{} + } + if opt.ExportType == nil { + opt.ExportType = Ptr(defaultExportType) + } + + req, err := s.client.NewRequest(http.MethodPost, createExportPath, opt, options) + if err != nil { + return nil, nil, err + } + + export := new(DependencyListExport) + resp, err := s.client.Do(req, &export) + if err != nil { + return nil, resp, err + } + + return export, resp, nil +} + +// GetDependencyListExport gets metadata about a single dependency list export. +// +// GitLab docs: +// https://docs.gitlab.com/ee/api/dependency_list_export.html#get-single-dependency-list-export +func (s *DependencyListExportService) GetDependencyListExport(id int, options ...RequestOptionFunc) (*DependencyListExport, *Response, error) { + // GET /dependency_list_exports/:id + getExportPath := fmt.Sprintf("dependency_list_exports/%d", id) + + req, err := s.client.NewRequest(http.MethodGet, getExportPath, nil, options) + if err != nil { + return nil, nil, err + } + + export := new(DependencyListExport) + resp, err := s.client.Do(req, &export) + if err != nil { + return nil, resp, err + } + + return export, resp, nil +} + +// DownloadDependencyListExport downloads a single dependency list export. +// +// The github.com/CycloneDX/cyclonedx-go package can be used to parse the data from the returned io.Reader. +// +// sbom := new(cdx.BOM) +// decoder := cdx.NewBOMDecoder(reader, cdx.BOMFileFormatJSON) +// +// if err = decoder.Decode(sbom); err != nil { +// panic(err) +// } +// +// GitLab docs: +// https://docs.gitlab.com/ee/api/dependency_list_export.html#download-dependency-list-export +func (s *DependencyListExportService) DownloadDependencyListExport(id int, options ...RequestOptionFunc) (io.Reader, *Response, error) { + // GET /dependency_list_exports/:id/download + downloadExportPath := fmt.Sprintf("dependency_list_exports/%d/download", id) + + req, err := s.client.NewRequest(http.MethodGet, downloadExportPath, nil, options) + if err != nil { + return nil, nil, err + } + + var sbomBuffer bytes.Buffer + resp, err := s.client.Do(req, &sbomBuffer) + if err != nil { + return nil, resp, err + } + + return &sbomBuffer, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go b/vendor/gitlab.com/gitlab-org/api/client-go/deploy_keys.go similarity index 91% rename from vendor/github.com/xanzy/go-gitlab/deploy_keys.go rename to vendor/gitlab.com/gitlab-org/api/client-go/deploy_keys.go index e343bef9..341689b2 100644 --- a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/deploy_keys.go @@ -62,11 +62,14 @@ func (k DeployKeyProject) String() string { // ProjectDeployKey represents a GitLab project deploy key. type ProjectDeployKey struct { - ID int `json:"id"` - Title string `json:"title"` - Key string `json:"key"` - CreatedAt *time.Time `json:"created_at"` - CanPush bool `json:"can_push"` + ID int `json:"id"` + Title string `json:"title"` + Key string `json:"key"` + Fingerprint string `json:"fingerprint"` + FingerprintSHA256 string `json:"fingerprint_sha256"` + CreatedAt *time.Time `json:"created_at"` + CanPush bool `json:"can_push"` + ExpiresAt *time.Time `json:"expires_at"` } func (k ProjectDeployKey) String() string { @@ -162,11 +165,12 @@ func (s *DeployKeysService) GetDeployKey(pid interface{}, deployKey int, options // AddDeployKeyOptions represents the available ADDDeployKey() options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#add-deploy-key +// https://docs.gitlab.com/ee/api/deploy_keys.html#add-deploy-key-for-a-project type AddDeployKeyOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Key *string `url:"key,omitempty" json:"key,omitempty"` - CanPush *bool `url:"can_push,omitempty" json:"can_push,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + CanPush *bool `url:"can_push,omitempty" json:"can_push,omitempty"` + ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` } // AddDeployKey creates a new deploy key for a project. If deploy key already @@ -174,7 +178,7 @@ type AddDeployKeyOptions struct { // original one was is accessible by same user. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#add-deploy-key +// https://docs.gitlab.com/ee/api/deploy_keys.html#add-deploy-key-for-a-project func (s *DeployKeysService) AddDeployKey(pid interface{}, opt *AddDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { project, err := parseID(pid) if err != nil { diff --git a/vendor/github.com/xanzy/go-gitlab/deploy_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/deploy_tokens.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/deploy_tokens.go rename to vendor/gitlab.com/gitlab-org/api/client-go/deploy_tokens.go diff --git a/vendor/github.com/xanzy/go-gitlab/deployments.go b/vendor/gitlab.com/gitlab-org/api/client-go/deployments.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/deployments.go rename to vendor/gitlab.com/gitlab-org/api/client-go/deployments.go diff --git a/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go b/vendor/gitlab.com/gitlab-org/api/client-go/deployments_merge_requests.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go rename to vendor/gitlab.com/gitlab-org/api/client-go/deployments_merge_requests.go diff --git a/vendor/github.com/xanzy/go-gitlab/discussions.go b/vendor/gitlab.com/gitlab-org/api/client-go/discussions.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/discussions.go rename to vendor/gitlab.com/gitlab-org/api/client-go/discussions.go diff --git a/vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/dockerfile_templates.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go rename to vendor/gitlab.com/gitlab-org/api/client-go/dockerfile_templates.go diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/dora_metrics.go b/vendor/gitlab.com/gitlab-org/api/client-go/dora_metrics.go new file mode 100644 index 00000000..a2ad418e --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/dora_metrics.go @@ -0,0 +1,110 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// DORAMetricsService handles communication with the DORA metrics related methods +// of the GitLab API. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricsService struct { + client *Client +} + +// DORAMetric represents a single DORA metric data point. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetric struct { + Date string `json:"date"` + Value float64 `json:"value"` +} + +// Gets a string representation of a DORAMetric data point +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +func (m DORAMetric) String() string { + return Stringify(m) +} + +// GetDORAMetricsOptions represent the request body options for getting +// DORA metrics +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type GetDORAMetricsOptions struct { + Metric *DORAMetricType `url:"metric,omitempty" json:"metric,omitempty"` + EndDate *ISOTime `url:"end_date,omitempty" json:"end_date,omitempty"` + EnvironmentTiers *[]string `url:"environment_tiers,comma,omitempty" json:"environment_tiers,omitempty"` + Interval *DORAMetricInterval `url:"interval,omitempty" json:"interval,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + + // Deprecated, use environment tiers instead + EnvironmentTier *string `url:"environment_tier,omitempty" json:"environment_tier,omitempty"` +} + +// GetProjectDORAMetrics gets the DORA metrics for a project. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/dora/metrics.html#get-project-level-dora-metrics +func (s *DORAMetricsService) GetProjectDORAMetrics(pid interface{}, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/dora/metrics", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var metrics []DORAMetric + resp, err := s.client.Do(req, &metrics) + if err != nil { + return nil, resp, err + } + + return metrics, resp, err +} + +// GetGroupDORAMetrics gets the DORA metrics for a group. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/dora/metrics.html#get-group-level-dora-metrics +func (s *DORAMetricsService) GetGroupDORAMetrics(gid interface{}, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/dora/metrics", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var metrics []DORAMetric + resp, err := s.client.Do(req, &metrics) + if err != nil { + return nil, resp, err + } + + return metrics, resp, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/draft_notes.go b/vendor/gitlab.com/gitlab-org/api/client-go/draft_notes.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/draft_notes.go rename to vendor/gitlab.com/gitlab-org/api/client-go/draft_notes.go diff --git a/vendor/github.com/xanzy/go-gitlab/environments.go b/vendor/gitlab.com/gitlab-org/api/client-go/environments.go similarity index 74% rename from vendor/github.com/xanzy/go-gitlab/environments.go rename to vendor/gitlab.com/gitlab-org/api/client-go/environments.go index b6d902f8..091fbb13 100644 --- a/vendor/github.com/xanzy/go-gitlab/environments.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/environments.go @@ -34,16 +34,22 @@ type EnvironmentsService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/environments.html type Environment struct { - ID int `json:"id"` - Name string `json:"name"` - Slug string `json:"slug"` - State string `json:"state"` - Tier string `json:"tier"` - ExternalURL string `json:"external_url"` - Project *Project `json:"project"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - LastDeployment *Deployment `json:"last_deployment"` + ID int `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + Description string `json:"description"` + State string `json:"state"` + Tier string `json:"tier"` + ExternalURL string `json:"external_url"` + Project *Project `json:"project"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + LastDeployment *Deployment `json:"last_deployment"` + ClusterAgent *Agent `json:"cluster_agent"` + KubernetesNamespace string `json:"kubernetes_namespace"` + FluxResourcePath string `json:"flux_resource_path"` + AutoStopAt *time.Time `json:"auto_stop_at"` + AutoStopSetting string `json:"auto_stop_setting"` } func (env Environment) String() string { @@ -117,9 +123,14 @@ func (s *EnvironmentsService) GetEnvironment(pid interface{}, environment int, o // GitLab API docs: // https://docs.gitlab.com/ee/api/environments.html#create-a-new-environment type CreateEnvironmentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - Tier *string `url:"tier,omitempty" json:"tier,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + Tier *string `url:"tier,omitempty" json:"tier,omitempty"` + ClusterAgentID *int `url:"cluster_agent_id,omitempty" json:"cluster_agent_id,omitempty"` + KubernetesNamespace *string `url:"kubernetes_namespace,omitempty" json:"kubernetes_namespace,omitempty"` + FluxResourcePath *string `url:"flux_resource_path,omitempty" json:"flux_resource_path,omitempty"` + AutoStopSetting *string `url:"auto_stop_setting,omitempty" json:"auto_stop_setting,omitempty"` } // CreateEnvironment adds an environment to a project. This is an idempotent @@ -155,9 +166,14 @@ func (s *EnvironmentsService) CreateEnvironment(pid interface{}, opt *CreateEnvi // GitLab API docs: // https://docs.gitlab.com/ee/api/environments.html#update-an-existing-environment type EditEnvironmentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - Tier *string `url:"tier,omitempty" json:"tier,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + Tier *string `url:"tier,omitempty" json:"tier,omitempty"` + ClusterAgentID *int `url:"cluster_agent_id,omitempty" json:"cluster_agent_id,omitempty"` + KubernetesNamespace *string `url:"kubernetes_namespace,omitempty" json:"kubernetes_namespace,omitempty"` + FluxResourcePath *string `url:"flux_resource_path,omitempty" json:"flux_resource_path,omitempty"` + AutoStopSetting *string `url:"auto_stop_setting,omitempty" json:"auto_stop_setting,omitempty"` } // EditEnvironment updates a project team environment to a specified access level.. diff --git a/vendor/github.com/xanzy/go-gitlab/epic_issues.go b/vendor/gitlab.com/gitlab-org/api/client-go/epic_issues.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/epic_issues.go rename to vendor/gitlab.com/gitlab-org/api/client-go/epic_issues.go diff --git a/vendor/github.com/xanzy/go-gitlab/epics.go b/vendor/gitlab.com/gitlab-org/api/client-go/epics.go similarity index 92% rename from vendor/github.com/xanzy/go-gitlab/epics.go rename to vendor/gitlab.com/gitlab-org/api/client-go/epics.go index cd553ead..684ffb33 100644 --- a/vendor/github.com/xanzy/go-gitlab/epics.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/epics.go @@ -175,12 +175,16 @@ func (s *EpicsService) GetEpicLinks(gid interface{}, epic int, options ...Reques // GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#new-epic type CreateEpicOptions struct { Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` } // CreateEpic creates a new group epic. @@ -211,15 +215,20 @@ func (s *EpicsService) CreateEpic(gid interface{}, opt *CreateEpicOptions, optio // // GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#update-epic type UpdateEpicOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` + AddLabels *LabelOptions `url:"add_labels,omitempty" json:"add_labels,omitempty"` Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` + DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` + DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + RemoveLabels *LabelOptions `url:"remove_labels,omitempty" json:"remove_labels,omitempty"` StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` - DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` - DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` + StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + UpdatedAt *time.Time `url:"updated_at,omitempty" json:"updated_at,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` } // UpdateEpic updates an existing group epic. This function is also used diff --git a/vendor/github.com/xanzy/go-gitlab/error_tracking.go b/vendor/gitlab.com/gitlab-org/api/client-go/error_tracking.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/error_tracking.go rename to vendor/gitlab.com/gitlab-org/api/client-go/error_tracking.go diff --git a/vendor/github.com/xanzy/go-gitlab/event_parsing.go b/vendor/gitlab.com/gitlab-org/api/client-go/event_parsing.go similarity index 78% rename from vendor/github.com/xanzy/go-gitlab/event_parsing.go rename to vendor/gitlab.com/gitlab-org/api/client-go/event_parsing.go index 3943abad..eb81fa05 100644 --- a/vendor/github.com/xanzy/go-gitlab/event_parsing.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/event_parsing.go @@ -27,24 +27,25 @@ type EventType string // List of available event types. const ( - EventConfidentialIssue EventType = "Confidential Issue Hook" - EventConfidentialNote EventType = "Confidential Note Hook" - EventTypeBuild EventType = "Build Hook" - EventTypeDeployment EventType = "Deployment Hook" - EventTypeFeatureFlag EventType = "Feature Flag Hook" - EventTypeIssue EventType = "Issue Hook" - EventTypeJob EventType = "Job Hook" - EventTypeMember EventType = "Member Hook" - EventTypeMergeRequest EventType = "Merge Request Hook" - EventTypeNote EventType = "Note Hook" - EventTypePipeline EventType = "Pipeline Hook" - EventTypePush EventType = "Push Hook" - EventTypeRelease EventType = "Release Hook" - EventTypeServiceHook EventType = "Service Hook" - EventTypeSubGroup EventType = "Subgroup Hook" - EventTypeSystemHook EventType = "System Hook" - EventTypeTagPush EventType = "Tag Push Hook" - EventTypeWikiPage EventType = "Wiki Page Hook" + EventConfidentialIssue EventType = "Confidential Issue Hook" + EventConfidentialNote EventType = "Confidential Note Hook" + EventTypeBuild EventType = "Build Hook" + EventTypeDeployment EventType = "Deployment Hook" + EventTypeFeatureFlag EventType = "Feature Flag Hook" + EventTypeIssue EventType = "Issue Hook" + EventTypeJob EventType = "Job Hook" + EventTypeMember EventType = "Member Hook" + EventTypeMergeRequest EventType = "Merge Request Hook" + EventTypeNote EventType = "Note Hook" + EventTypePipeline EventType = "Pipeline Hook" + EventTypePush EventType = "Push Hook" + EventTypeRelease EventType = "Release Hook" + EventTypeResourceAccessToken EventType = "Resource Access Token Hook" + EventTypeServiceHook EventType = "Service Hook" + EventTypeSubGroup EventType = "Subgroup Hook" + EventTypeSystemHook EventType = "System Hook" + EventTypeTagPush EventType = "Tag Push Hook" + EventTypeWikiPage EventType = "Wiki Page Hook" ) const ( @@ -71,6 +72,13 @@ type serviceEvent struct { ObjectKind string `json:"object_kind"` } +const eventTokenHeader = "X-Gitlab-Token" + +// HookEventToken returns the token for the given request. +func HookEventToken(r *http.Request) string { + return r.Header.Get(eventTokenHeader) +} + const eventTypeHeader = "X-Gitlab-Event" // HookEventType returns the event type for the given request. @@ -83,7 +91,7 @@ func HookEventType(r *http.Request) EventType { // Example usage: // // func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := ioutil.ReadAll(r.Body) +// payload, err := io.ReadAll(r.Body) // if err != nil { ... } // event, err := gitlab.ParseHook(gitlab.HookEventType(r), payload) // if err != nil { ... } @@ -111,7 +119,7 @@ func ParseHook(eventType EventType, payload []byte) (event interface{}, err erro // Example usage: // // func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := ioutil.ReadAll(r.Body) +// payload, err := io.ReadAll(r.Body) // if err != nil { ... } // event, err := gitlab.ParseSystemhook(payload) // if err != nil { ... } @@ -195,7 +203,7 @@ func WebhookEventType(r *http.Request) EventType { // Example usage: // // func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := ioutil.ReadAll(r.Body) +// payload, err := io.ReadAll(r.Body) // if err != nil { ... } // event, err := gitlab.ParseWebhook(gitlab.HookEventType(r), payload) // if err != nil { ... } @@ -252,6 +260,24 @@ func ParseWebhook(eventType EventType, payload []byte) (event interface{}, err e event = &PushEvent{} case EventTypeRelease: event = &ReleaseEvent{} + case EventTypeResourceAccessToken: + data := map[string]interface{}{} + err := json.Unmarshal(payload, &data) + if err != nil { + return nil, err + } + + _, groupEvent := data["group"] + _, projectEvent := data["project"] + + switch { + case groupEvent: + event = &GroupResourceAccessTokenEvent{} + case projectEvent: + event = &ProjectResourceAccessTokenEvent{} + default: + return nil, fmt.Errorf("unexpected resource access token payload") + } case EventTypeServiceHook: service := &serviceEvent{} err := json.Unmarshal(payload, service) diff --git a/vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go b/vendor/gitlab.com/gitlab-org/api/client-go/event_systemhook_types.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go rename to vendor/gitlab.com/gitlab-org/api/client-go/event_systemhook_types.go diff --git a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go b/vendor/gitlab.com/gitlab-org/api/client-go/event_webhook_types.go similarity index 87% rename from vendor/github.com/xanzy/go-gitlab/event_webhook_types.go rename to vendor/gitlab.com/gitlab-org/api/client-go/event_webhook_types.go index 01b09830..deded265 100644 --- a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/event_webhook_types.go @@ -102,21 +102,22 @@ type CommitCommentEvent struct { } `json:"project"` Repository *Repository `json:"repository"` ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff *Diff `json:"st_diff"` - Description string `json:"description"` - URL string `json:"url"` + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff *Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` } `json:"object_attributes"` Commit *struct { ID string `json:"id"` @@ -131,7 +132,7 @@ type CommitCommentEvent struct { } `json:"commit"` } -// DeploymentEvent represents a deployment event +// DeploymentEvent represents a deployment event. // // GitLab API docs: // https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#deployment-events @@ -171,7 +172,7 @@ type DeploymentEvent struct { CommitTitle string `json:"commit_title"` } -// FeatureFlagEvent represents a feature flag event +// FeatureFlagEvent represents a feature flag event. // // GitLab API docs: // https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#feature-flag-events @@ -205,6 +206,28 @@ type FeatureFlagEvent struct { } `json:"object_attributes"` } +// GroupResourceAccessTokenEvent represents a resource access token event for a +// group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#project-and-group-access-token-events +type GroupResourceAccessTokenEvent struct { + EventName string `json:"event_name"` + ObjectKind string `json:"object_kind"` + Group struct { + GroupID int `json:"group_id"` + GroupName string `json:"group_name"` + GroupPath string `json:"group_path"` + } `json:"group"` + ObjectAttributes struct { + ID int `json:"id"` + UserID int `json:"user_id"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + } `json:"object_attributes"` +} + // IssueCommentEvent represents a comment on an issue event. // // GitLab API docs: @@ -232,22 +255,23 @@ type IssueCommentEvent struct { } `json:"project"` Repository *Repository `json:"repository"` ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - DiscussionID string `json:"discussion_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff []*Diff `json:"st_diff"` - Description string `json:"description"` - URL string `json:"url"` + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + DiscussionID string `json:"discussion_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff []*Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` } `json:"object_attributes"` Issue struct { ID int `json:"id"` @@ -491,30 +515,31 @@ type MergeCommentEvent struct { Visibility VisibilityValue `json:"visibility"` } `json:"project"` ObjectAttributes struct { - Attachment string `json:"attachment"` - AuthorID int `json:"author_id"` - ChangePosition *NotePosition `json:"change_position"` - CommitID string `json:"commit_id"` - CreatedAt string `json:"created_at"` - DiscussionID string `json:"discussion_id"` - ID int `json:"id"` - LineCode string `json:"line_code"` - Note string `json:"note"` - NoteableID int `json:"noteable_id"` - NoteableType string `json:"noteable_type"` - OriginalPosition *NotePosition `json:"original_position"` - Position *NotePosition `json:"position"` - ProjectID int `json:"project_id"` - ResolvedAt string `json:"resolved_at"` - ResolvedByID int `json:"resolved_by_id"` - ResolvedByPush bool `json:"resolved_by_push"` - StDiff *Diff `json:"st_diff"` - System bool `json:"system"` - Type string `json:"type"` - UpdatedAt string `json:"updated_at"` - UpdatedByID int `json:"updated_by_id"` - Description string `json:"description"` - URL string `json:"url"` + Attachment string `json:"attachment"` + AuthorID int `json:"author_id"` + ChangePosition *NotePosition `json:"change_position"` + CommitID string `json:"commit_id"` + CreatedAt string `json:"created_at"` + DiscussionID string `json:"discussion_id"` + ID int `json:"id"` + LineCode string `json:"line_code"` + Note string `json:"note"` + NoteableID int `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + OriginalPosition *NotePosition `json:"original_position"` + Position *NotePosition `json:"position"` + ProjectID int `json:"project_id"` + ResolvedAt string `json:"resolved_at"` + ResolvedByID int `json:"resolved_by_id"` + ResolvedByPush bool `json:"resolved_by_push"` + StDiff *Diff `json:"st_diff"` + System bool `json:"system"` + Type string `json:"type"` + UpdatedAt string `json:"updated_at"` + UpdatedByID int `json:"updated_by_id"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` } `json:"object_attributes"` Repository *Repository `json:"repository"` MergeRequest struct { @@ -696,6 +721,10 @@ type MergeEvent struct { Previous int `json:"previous"` Current int `json:"current"` } `json:"last_edited_by_id"` + MergeStatus struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"merge_status"` MilestoneID struct { Previous int `json:"previous"` Current int `json:"current"` @@ -737,7 +766,8 @@ type MergeEvent struct { Reviewers []*EventUser `json:"reviewers"` } -// EventUser represents a user record in an event and is used as an even initiator or a merge assignee. +// EventUser represents a user record in an event and is used as an even +// initiator or a merge assignee. type EventUser struct { ID int `json:"id"` Name string `json:"name"` @@ -753,7 +783,8 @@ type MergeParams struct { // UnmarshalJSON decodes the merge parameters // -// This allows support of ForceRemoveSourceBranch for both type bool (>11.9) and string (<11.9) +// This allows support of ForceRemoveSourceBranch for both type +// bool (>11.9) and string (<11.9) func (p *MergeParams) UnmarshalJSON(b []byte) error { type Alias MergeParams raw := struct { @@ -855,7 +886,7 @@ type PipelineEvent struct { Email string `json:"email"` } `json:"author"` } `json:"commit"` - SourcePipline struct { + SourcePipeline struct { Project struct { ID int `json:"id"` WebURL string `json:"web_url"` @@ -899,6 +930,41 @@ type PipelineEvent struct { } `json:"builds"` } +// ProjectResourceAccessTokenEvent represents a resource access token event for +// a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#project-and-group-access-token-events +type ProjectResourceAccessTokenEvent struct { + EventName string `json:"event_name"` + ObjectKind string `json:"object_kind"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + } `json:"project"` + ObjectAttributes struct { + ID int `json:"id"` + UserID int `json:"user_id"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + } `json:"object_attributes"` +} + // PushEvent represents a push event. // // GitLab API docs: @@ -1037,21 +1103,22 @@ type SnippetCommentEvent struct { } `json:"project"` Repository *Repository `json:"repository"` ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff *Diff `json:"st_diff"` - Description string `json:"description"` - URL string `json:"url"` + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff *Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` } `json:"object_attributes"` Snippet *struct { ID int `json:"id"` diff --git a/vendor/github.com/xanzy/go-gitlab/events.go b/vendor/gitlab.com/gitlab-org/api/client-go/events.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/events.go rename to vendor/gitlab.com/gitlab-org/api/client-go/events.go diff --git a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go b/vendor/gitlab.com/gitlab-org/api/client-go/external_status_checks.go similarity index 90% rename from vendor/github.com/xanzy/go-gitlab/external_status_checks.go rename to vendor/gitlab.com/gitlab-org/api/client-go/external_status_checks.go index dc8e95b8..c6a3f7b2 100644 --- a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/external_status_checks.go @@ -197,3 +197,22 @@ func (s *ExternalStatusChecksService) UpdateExternalStatusCheck(pid interface{}, return s.client.Do(req, nil) } + +// UpdateExternalStatusCheck updates an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#retry-failed-status-check-for-a-merge-request +func (s *ExternalStatusChecksService) RetryFailedStatusCheckForAMergeRequest(pid interface{}, mergeRequest int, externalStatusCheck int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks/%d/retry", PathEscape(project), mergeRequest, externalStatusCheck) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/feature_flags.go b/vendor/gitlab.com/gitlab-org/api/client-go/feature_flags.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/feature_flags.go rename to vendor/gitlab.com/gitlab-org/api/client-go/feature_flags.go diff --git a/vendor/github.com/xanzy/go-gitlab/freeze_periods.go b/vendor/gitlab.com/gitlab-org/api/client-go/freeze_periods.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/freeze_periods.go rename to vendor/gitlab.com/gitlab-org/api/client-go/freeze_periods.go diff --git a/vendor/github.com/xanzy/go-gitlab/generic_packages.go b/vendor/gitlab.com/gitlab-org/api/client-go/generic_packages.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/generic_packages.go rename to vendor/gitlab.com/gitlab-org/api/client-go/generic_packages.go diff --git a/vendor/github.com/xanzy/go-gitlab/geo_nodes.go b/vendor/gitlab.com/gitlab-org/api/client-go/geo_nodes.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/geo_nodes.go rename to vendor/gitlab.com/gitlab-org/api/client-go/geo_nodes.go diff --git a/vendor/github.com/xanzy/go-gitlab/gitignore_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/gitignore_templates.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/gitignore_templates.go rename to vendor/gitlab.com/gitlab-org/api/client-go/gitignore_templates.go diff --git a/vendor/github.com/xanzy/go-gitlab/gitlab.go b/vendor/gitlab.com/gitlab-org/api/client-go/gitlab.go similarity index 95% rename from vendor/github.com/xanzy/go-gitlab/gitlab.go rename to vendor/gitlab.com/gitlab-org/api/client-go/gitlab.go index e4920501..24a6fd88 100644 --- a/vendor/github.com/xanzy/go-gitlab/gitlab.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/gitlab.go @@ -21,8 +21,10 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" + "math" "math/rand" "mime/multipart" "net/http" @@ -65,6 +67,8 @@ const ( PrivateToken ) +var ErrNotFound = errors.New("404 Not Found") + // A Client manages communication with the GitLab API. type Client struct { // HTTP client used to communicate with the API. @@ -113,17 +117,20 @@ type Client struct { Boards *IssueBoardsService Branches *BranchesService BroadcastMessage *BroadcastMessagesService + BulkImports *BulkImportsService CIYMLTemplate *CIYMLTemplatesService ClusterAgents *ClusterAgentsService Commits *CommitsService ContainerRegistry *ContainerRegistryService CustomAttribute *CustomAttributesService + DependencyListExport *DependencyListExportService DeployKeys *DeployKeysService DeployTokens *DeployTokensService DeploymentMergeRequests *DeploymentMergeRequestsService Deployments *DeploymentsService Discussions *DiscussionsService DockerfileTemplate *DockerfileTemplatesService + DORAMetrics *DORAMetricsService DraftNotes *DraftNotesService Environments *EnvironmentsService EpicIssues *EpicIssuesService @@ -148,10 +155,12 @@ type Client struct { GroupMilestones *GroupMilestonesService GroupProtectedEnvironments *GroupProtectedEnvironmentsService GroupRepositoryStorageMove *GroupRepositoryStorageMoveService + GroupSecuritySettings *GroupSecuritySettingsService GroupSSHCertificates *GroupSSHCertificatesService GroupVariables *GroupVariablesService GroupWikis *GroupWikisService Groups *GroupsService + Import *ImportService InstanceCluster *InstanceClustersService InstanceVariables *InstanceVariablesService Invites *InvitesService @@ -189,6 +198,7 @@ type Client struct { ProjectFeatureFlags *ProjectFeatureFlagService ProjectImportExport *ProjectImportExportService ProjectIterations *ProjectIterationsService + ProjectMarkdownUploads *ProjectMarkdownUploadsService ProjectMembers *ProjectMembersService ProjectMirrors *ProjectMirrorService ProjectRepositoryStorageMove *ProjectRepositoryStorageMoveService @@ -205,6 +215,7 @@ type Client struct { Repositories *RepositoriesService RepositoryFiles *RepositoryFilesService RepositorySubmodules *RepositorySubmodulesService + ResourceGroup *ResourceGroupService ResourceIterationEvents *ResourceIterationEventsService ResourceLabelEvents *ResourceLabelEventsService ResourceMilestoneEvents *ResourceMilestoneEventsService @@ -230,15 +241,16 @@ type Client struct { // ListOptions specifies the optional parameters to various List methods that // support pagination. type ListOptions struct { - // For offset-based paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty" json:"page,omitempty"` + // For keyset-based paginated result sets, the value must be `"keyset"` + Pagination string `url:"pagination,omitempty" json:"pagination,omitempty"` // For offset-based and keyset-based paginated result sets, the number of results to include per page. PerPage int `url:"per_page,omitempty" json:"per_page,omitempty"` - + // For offset-based paginated result sets, page of results to retrieve. + Page int `url:"page,omitempty" json:"page,omitempty"` + // For keyset-based paginated result sets, tree record ID at which to fetch the next page. + PageToken string `url:"page_token,omitempty" json:"page_token,omitempty"` // For keyset-based paginated result sets, name of the column by which to order OrderBy string `url:"order_by,omitempty" json:"order_by,omitempty"` - // For keyset-based paginated result sets, the value must be `"keyset"` - Pagination string `url:"pagination,omitempty" json:"pagination,omitempty"` // For keyset-based paginated result sets, sort order (`"asc"`` or `"desc"`) Sort string `url:"sort,omitempty" json:"sort,omitempty"` } @@ -347,17 +359,20 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.Boards = &IssueBoardsService{client: c} c.Branches = &BranchesService{client: c} c.BroadcastMessage = &BroadcastMessagesService{client: c} + c.BulkImports = &BulkImportsService{client: c} c.CIYMLTemplate = &CIYMLTemplatesService{client: c} c.ClusterAgents = &ClusterAgentsService{client: c} c.Commits = &CommitsService{client: c} c.ContainerRegistry = &ContainerRegistryService{client: c} c.CustomAttribute = &CustomAttributesService{client: c} + c.DependencyListExport = &DependencyListExportService{client: c} c.DeployKeys = &DeployKeysService{client: c} c.DeployTokens = &DeployTokensService{client: c} c.DeploymentMergeRequests = &DeploymentMergeRequestsService{client: c} c.Deployments = &DeploymentsService{client: c} c.Discussions = &DiscussionsService{client: c} c.DockerfileTemplate = &DockerfileTemplatesService{client: c} + c.DORAMetrics = &DORAMetricsService{client: c} c.DraftNotes = &DraftNotesService{client: c} c.Environments = &EnvironmentsService{client: c} c.EpicIssues = &EpicIssuesService{client: c} @@ -382,10 +397,12 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.GroupMilestones = &GroupMilestonesService{client: c} c.GroupProtectedEnvironments = &GroupProtectedEnvironmentsService{client: c} c.GroupRepositoryStorageMove = &GroupRepositoryStorageMoveService{client: c} + c.GroupSecuritySettings = &GroupSecuritySettingsService{client: c} c.GroupSSHCertificates = &GroupSSHCertificatesService{client: c} c.GroupVariables = &GroupVariablesService{client: c} c.GroupWikis = &GroupWikisService{client: c} c.Groups = &GroupsService{client: c} + c.Import = &ImportService{client: c} c.InstanceCluster = &InstanceClustersService{client: c} c.InstanceVariables = &InstanceVariablesService{client: c} c.Invites = &InvitesService{client: c} @@ -423,6 +440,7 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.ProjectFeatureFlags = &ProjectFeatureFlagService{client: c} c.ProjectImportExport = &ProjectImportExportService{client: c} c.ProjectIterations = &ProjectIterationsService{client: c} + c.ProjectMarkdownUploads = &ProjectMarkdownUploadsService{client: c} c.ProjectMembers = &ProjectMembersService{client: c} c.ProjectMirrors = &ProjectMirrorService{client: c} c.ProjectRepositoryStorageMove = &ProjectRepositoryStorageMoveService{client: c} @@ -439,6 +457,7 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.Repositories = &RepositoriesService{client: c} c.RepositoryFiles = &RepositoryFilesService{client: c} c.RepositorySubmodules = &RepositorySubmodulesService{client: c} + c.ResourceGroup = &ResourceGroupService{client: c} c.ResourceIterationEvents = &ResourceIterationEventsService{client: c} c.ResourceLabelEvents = &ResourceLabelEventsService{client: c} c.ResourceMilestoneEvents = &ResourceMilestoneEventsService{client: c} @@ -515,6 +534,11 @@ func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Respons min = wait } } + } else { + // In case the RateLimit-Reset header is not set, back off an additional + // 100% exponentially. With the default milliseconds being set to 100 for + // `min`, this makes the 5th retry wait 3.2 seconds (3,200 ms) by default. + min = time.Duration(float64(min) * math.Pow(2, float64(attemptNum))) } } @@ -953,8 +977,13 @@ type ErrorResponse struct { func (e *ErrorResponse) Error() string { path, _ := url.QueryUnescape(e.Response.Request.URL.Path) - u := fmt.Sprintf("%s://%s%s", e.Response.Request.URL.Scheme, e.Response.Request.URL.Host, path) - return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, u, e.Response.StatusCode, e.Message) + url := fmt.Sprintf("%s://%s%s", e.Response.Request.URL.Scheme, e.Response.Request.URL.Host, path) + + if e.Message == "" { + return fmt.Sprintf("%s %s: %d", e.Response.Request.Method, url, e.Response.StatusCode) + } else { + return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, url, e.Response.StatusCode, e.Message) + } } // CheckResponse checks the API response for errors, and returns them if present. @@ -962,11 +991,14 @@ func CheckResponse(r *http.Response) error { switch r.StatusCode { case 200, 201, 202, 204, 304: return nil + case 404: + return ErrNotFound } errorResponse := &ErrorResponse{Response: r} + data, err := io.ReadAll(r.Body) - if err == nil && data != nil { + if err == nil && strings.TrimSpace(string(data)) != "" { errorResponse.Body = data var raw interface{} diff --git a/vendor/github.com/xanzy/go-gitlab/group_access_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_access_tokens.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_access_tokens.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_access_tokens.go diff --git a/vendor/github.com/xanzy/go-gitlab/group_badges.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_badges.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_badges.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_badges.go diff --git a/vendor/github.com/xanzy/go-gitlab/group_boards.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_boards.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_boards.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_boards.go diff --git a/vendor/github.com/xanzy/go-gitlab/group_clusters.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_clusters.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_clusters.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_clusters.go diff --git a/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_epic_boards.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_epic_boards.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_epic_boards.go diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_hooks.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_hooks.go new file mode 100644 index 00000000..320f0384 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_hooks.go @@ -0,0 +1,287 @@ +// +// Copyright 2021, Eric Stevens +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// GroupHook represents a GitLab group hook. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks +type GroupHook struct { + ID int `json:"id"` + URL string `json:"url"` + GroupID int `json:"group_id"` + PushEvents bool `json:"push_events"` + PushEventsBranchFilter string `json:"push_events_branch_filter"` + IssuesEvents bool `json:"issues_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + TagPushEvents bool `json:"tag_push_events"` + NoteEvents bool `json:"note_events"` + JobEvents bool `json:"job_events"` + PipelineEvents bool `json:"pipeline_events"` + WikiPageEvents bool `json:"wiki_page_events"` + DeploymentEvents bool `json:"deployment_events"` + ReleasesEvents bool `json:"releases_events"` + SubGroupEvents bool `json:"subgroup_events"` + MemberEvents bool `json:"member_events"` + EnableSSLVerification bool `json:"enable_ssl_verification"` + AlertStatus string `json:"alert_status"` + CreatedAt *time.Time `json:"created_at"` + CustomWebhookTemplate string `json:"custom_webhook_template"` + ResourceAccessTokenEvents bool `json:"resource_access_token_events"` + CustomHeaders []*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` +} + +// ListGroupHooksOptions represents the available ListGroupHooks() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks +type ListGroupHooksOptions ListOptions + +// ListGroupHooks gets a list of group hooks. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks +func (s *GroupsService) ListGroupHooks(gid interface{}, opt *ListGroupHooksOptions, options ...RequestOptionFunc) ([]*GroupHook, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + var gh []*GroupHook + resp, err := s.client.Do(req, &gh) + if err != nil { + return nil, resp, err + } + + return gh, resp, nil +} + +// GetGroupHook gets a specific hook for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#get-group-hook +func (s *GroupsService) GetGroupHook(pid interface{}, hook int, options ...RequestOptionFunc) (*GroupHook, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gh := new(GroupHook) + resp, err := s.client.Do(req, gh) + if err != nil { + return nil, resp, err + } + + return gh, resp, nil +} + +// AddGroupHookOptions represents the available AddGroupHook() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook +type AddGroupHookOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` + MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` +} + +// AddGroupHook create a new group scoped webhook. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook +func (s *GroupsService) AddGroupHook(gid interface{}, opt *AddGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gh := new(GroupHook) + resp, err := s.client.Do(req, gh) + if err != nil { + return nil, resp, err + } + + return gh, resp, nil +} + +// EditGroupHookOptions represents the available EditGroupHook() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#edit-group-hook +type EditGroupHookOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` + MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` +} + +// EditGroupHook edits a hook for a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/groups.html#edit-group-hook +func (s *GroupsService) EditGroupHook(pid interface{}, hook int, opt *EditGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + gh := new(GroupHook) + resp, err := s.client.Do(req, gh) + if err != nil { + return nil, resp, err + } + + return gh, resp, nil +} + +// DeleteGroupHook removes a hook from a group. This is an idempotent +// method and can be called multiple times. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-group-hook +func (s *GroupsService) DeleteGroupHook(pid interface{}, hook int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// TriggerTestGroupHook triggers a test hook for a specified group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_webhooks.html#trigger-a-test-group-hook +func (s *GroupsService) TriggerTestGroupHook(pid interface{}, hook int, trigger GroupHookTrigger, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d/test/%s", PathEscape(group), hook, trigger) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetGroupCustomHeader creates or updates a group custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#set-a-custom-header +func (s *GroupsService) SetGroupCustomHeader(gid interface{}, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteGroupCustomHeader deletes a group custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-a-custom-header +func (s *GroupsService) DeleteGroupCustomHeader(gid interface{}, hook int, key string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_import_export.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_import_export.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_import_export.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_import_export.go diff --git a/vendor/github.com/xanzy/go-gitlab/group_iterations.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_iterations.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_iterations.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_iterations.go diff --git a/vendor/github.com/xanzy/go-gitlab/group_labels.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_labels.go similarity index 82% rename from vendor/github.com/xanzy/go-gitlab/group_labels.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_labels.go index 5a390269..8004bb2d 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_labels.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_labels.go @@ -79,12 +79,12 @@ func (s *GroupLabelsService) ListGroupLabels(gid interface{}, opt *ListGroupLabe // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_labels.html#get-a-single-group-label -func (s *GroupLabelsService) GetGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { +func (s *GroupLabelsService) GetGroupLabel(gid interface{}, lid interface{}, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } - label, err := parseID(labelID) + label, err := parseID(lid) if err != nil { return nil, nil, err } @@ -108,7 +108,12 @@ func (s *GroupLabelsService) GetGroupLabel(gid interface{}, labelID interface{}, // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_labels.html#create-a-new-group-label -type CreateGroupLabelOptions CreateLabelOptions +type CreateGroupLabelOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Priority *int `url:"priority,omitempty" json:"priority,omitempty"` +} // CreateGroupLabel creates a new label for given group with given name and // color. @@ -140,7 +145,9 @@ func (s *GroupLabelsService) CreateGroupLabel(gid interface{}, opt *CreateGroupL // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_labels.html#delete-a-group-label -type DeleteGroupLabelOptions DeleteLabelOptions +type DeleteGroupLabelOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` +} // DeleteGroupLabel deletes a group label given by its name or ID. // @@ -173,20 +180,34 @@ func (s *GroupLabelsService) DeleteGroupLabel(gid interface{}, lid interface{}, // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_labels.html#update-a-group-label -type UpdateGroupLabelOptions UpdateLabelOptions +type UpdateGroupLabelOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Priority *int `url:"priority,omitempty" json:"priority,omitempty"` +} // UpdateGroupLabel updates an existing label with new name or now color. At least // one parameter is required, to update the label. // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_labels.html#update-a-group-label -func (s *GroupLabelsService) UpdateGroupLabel(gid interface{}, opt *UpdateGroupLabelOptions, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { +func (s *GroupLabelsService) UpdateGroupLabel(gid interface{}, lid interface{}, opt *UpdateGroupLabelOptions, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) + if lid != nil { + label, err := parseID(lid) + if err != nil { + return nil, nil, err + } + u = fmt.Sprintf("groups/%s/labels/%s", PathEscape(group), PathEscape(label)) + } + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { return nil, nil, err @@ -207,12 +228,12 @@ func (s *GroupLabelsService) UpdateGroupLabel(gid interface{}, opt *UpdateGroupL // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_labels.html#subscribe-to-a-group-label -func (s *GroupLabelsService) SubscribeToGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { +func (s *GroupLabelsService) SubscribeToGroupLabel(gid interface{}, lid interface{}, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } - label, err := parseID(labelID) + label, err := parseID(lid) if err != nil { return nil, nil, err } @@ -238,12 +259,12 @@ func (s *GroupLabelsService) SubscribeToGroupLabel(gid interface{}, labelID inte // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_labels.html#unsubscribe-from-a-group-label -func (s *GroupLabelsService) UnsubscribeFromGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { +func (s *GroupLabelsService) UnsubscribeFromGroupLabel(gid interface{}, lid interface{}, options ...RequestOptionFunc) (*Response, error) { group, err := parseID(gid) if err != nil { return nil, err } - label, err := parseID(labelID) + label, err := parseID(lid) if err != nil { return nil, err } diff --git a/vendor/github.com/xanzy/go-gitlab/group_members.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_members.go similarity index 79% rename from vendor/github.com/xanzy/go-gitlab/group_members.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_members.go index cc7bbd6e..109b2056 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_members.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_members.go @@ -30,17 +30,6 @@ type GroupMembersService struct { client *Client } -// GroupMemberSAMLIdentity represents the SAML Identity link for the group member. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -// Gitlab MR for API change: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20357 -// Gitlab MR for API Doc change: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25652 -type GroupMemberSAMLIdentity struct { - ExternUID string `json:"extern_uid"` - Provider string `json:"provider"` - SAMLProviderID int `json:"saml_provider_id"` -} - // GroupMember represents a GitLab group member. // // GitLab API docs: https://docs.gitlab.com/ee/api/members.html @@ -59,6 +48,50 @@ type GroupMember struct { MemberRole *MemberRole `json:"member_role"` } +// GroupMemberSAMLIdentity represents the SAML Identity link for the group member. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project +type GroupMemberSAMLIdentity struct { + ExternUID string `json:"extern_uid"` + Provider string `json:"provider"` + SAMLProviderID int `json:"saml_provider_id"` +} + +// BillableGroupMember represents a GitLab billable group member. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group +type BillableGroupMember struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + Email string `json:"email"` + LastActivityOn *ISOTime `json:"last_activity_on"` + MembershipType string `json:"membership_type"` + Removable bool `json:"removable"` + CreatedAt *time.Time `json:"created_at"` + IsLastOwner bool `json:"is_last_owner"` + LastLoginAt *time.Time `json:"last_login_at"` +} + +// BillableUserMembership represents a Membership of a billable user of a group +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-memberships-for-a-billable-member-of-a-group +type BillableUserMembership struct { + ID int `json:"id"` + SourceID int `json:"source_id"` + SourceFullName string `json:"source_full_name"` + SourceMembersURL string `json:"source_members_url"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *time.Time `json:"expires_at"` + AccessLevel *AccessLevelDetails `json:"access_level"` +} + // ListGroupMembersOptions represents the available ListGroupMembers() and // ListAllGroupMembers() options. // @@ -128,6 +161,7 @@ func (s *GroupsService) ListAllGroupMembers(gid interface{}, opt *ListGroupMembe // https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project type AddGroupMemberOptions struct { UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` @@ -158,26 +192,34 @@ func (s *GroupMembersService) GetGroupMember(gid interface{}, user int, options return gm, resp, nil } -// BillableGroupMember represents a GitLab billable group member. +// GetInheritedGroupMember get a member of a group or project, including +// inherited and invited members // -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group -type BillableGroupMember struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - Email string `json:"email"` - LastActivityOn *ISOTime `json:"last_activity_on"` - MembershipType string `json:"membership_type"` - Removable bool `json:"removable"` - CreatedAt *time.Time `json:"created_at"` - IsLastOwner bool `json:"is_last_owner"` - LastLoginAt *time.Time `json:"last_login_at"` +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project-including-inherited-and-invited-members +func (s *GroupMembersService) GetInheritedGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/members/all/%d", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gm := new(GroupMember) + resp, err := s.client.Do(req, gm) + if err != nil { + return nil, resp, err + } + + return gm, resp, err } -// ListBillableGroupMembersOptions represents the available ListBillableGroupMembers() options. +// ListBillableGroupMembersOptions represents the available +// ListBillableGroupMembers() options. // // GitLab API docs: // https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group @@ -213,6 +255,39 @@ func (s *GroupsService) ListBillableGroupMembers(gid interface{}, opt *ListBilla return bgm, resp, nil } +// ListMembershipsForBillableGroupMemberOptions represents the available +// ListMembershipsForBillableGroupMember() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-memberships-for-a-billable-member-of-a-group +type ListMembershipsForBillableGroupMemberOptions = ListOptions + +// ListMembershipsForBillableGroupMember gets a list of memberships for a +// billable member of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-memberships-for-a-billable-member-of-a-group +func (s *GroupsService) ListMembershipsForBillableGroupMember(gid interface{}, user int, opt *ListMembershipsForBillableGroupMemberOptions, options ...RequestOptionFunc) ([]*BillableUserMembership, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/billable_members/%d/memberships", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var bum []*BillableUserMembership + resp, err := s.client.Do(req, &bum) + if err != nil { + return nil, resp, err + } + + return bum, resp, nil +} + // RemoveBillableGroupMember removes a given group members that count as billable. // // GitLab API docs: @@ -339,7 +414,8 @@ func (s *GroupMembersService) EditGroupMember(gid interface{}, user int, opt *Ed // RemoveGroupMemberOptions represents the available options to remove a group member. // -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project type RemoveGroupMemberOptions struct { SkipSubresources *bool `url:"skip_subresources,omitempty" json:"skip_subresources,omitempty"` UnassignIssuables *bool `url:"unassign_issuables,omitempty" json:"unassign_issuables,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/group_milestones.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_milestones.go similarity index 89% rename from vendor/github.com/xanzy/go-gitlab/group_milestones.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_milestones.go index 179d0594..f3089b21 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_milestones.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_milestones.go @@ -58,11 +58,19 @@ func (m GroupMilestone) String() string { // https://docs.gitlab.com/ee/api/group_milestones.html#list-group-milestones type ListGroupMilestonesOptions struct { ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + SearchTitle *string `url:"search_title,omitempty" json:"search_title,omitempty"` + IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` + IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` + IncludeDescendents *bool `url:"include_descendents,omitempty" json:"include_descendents,omitempty"` + UpdatedBefore *ISOTime `url:"updated_before,omitempty" json:"updated_before,omitempty"` + UpdatedAfter *ISOTime `url:"updated_after,omitempty" json:"updated_after,omitempty"` + ContainingDate *ISOTime `url:"containing_date,omitempty" json:"containing_date,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + EndDate *ISOTime `url:"end_date,omitempty" json:"end_date,omitempty"` } // ListGroupMilestones returns a list of group milestones. diff --git a/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_protected_environments.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_protected_environments.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_protected_environments.go diff --git a/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_repository_storage_move.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_repository_storage_move.go diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_security_settings.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_security_settings.go new file mode 100644 index 00000000..c73b67dc --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_security_settings.go @@ -0,0 +1,82 @@ +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GroupSecuritySettingsService handles communication with the Group Security Settings +// related methods of the GitLab API. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/group_security_settings.html +type GroupSecuritySettingsService struct { + client *Client +} + +// GroupSecuritySettings represents the group security settings data. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/group_security_settings.html +type GroupSecuritySettings struct { + SecretPushProtectionEnabled bool `json:"secret_push_protection_enabled"` + Errors []string `json:"errors"` +} + +// Gets a string representation of the GroupSecuritySettings data. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_security_settings.html +func (s GroupSecuritySettings) String() string { + return Stringify(s) +} + +// GetGroupSecuritySettingsOptions represent the request options for updating +// the group security settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_security_settings.html#update-secret_push_protection_enabled-setting +type UpdateGroupSecuritySettingsOptions struct { + SecretPushProtectionEnabled *bool `url:"secret_push_protection_enabled,omitempty" json:"secret_push_protection_enabled,omitempty"` + ProjectsToExclude *[]int `url:"projects_to_exclude,omitempty" json:"projects_to_exclude,omitempty"` +} + +// UpdateSecretPushProtectionEnabledSetting updates the secret_push_protection_enabled +// setting for the all projects in a group to the provided value. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/group_security_settings.html#update-secret_push_protection_enabled-setting +func (s *GroupSecuritySettingsService) UpdateSecretPushProtectionEnabledSetting(gid interface{}, opt UpdateGroupSecuritySettingsOptions, options ...RequestOptionFunc) (*GroupSecuritySettings, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/security_settings", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + settings := new(GroupSecuritySettings) + resp, err := s.client.Do(req, &settings) + if err != nil { + return nil, resp, err + } + + return settings, resp, err +} diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/group_serviceaccounts.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_serviceaccounts.go new file mode 100644 index 00000000..feba6c8b --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_serviceaccounts.go @@ -0,0 +1,190 @@ +// +// Copyright 2023, James Hong +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GroupServiceAccount represents a GitLab service account user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#create-service-account-user +type GroupServiceAccount struct { + ID int `json:"id"` + Name string `json:"name"` + UserName string `json:"username"` +} + +// ListServiceAccountsOptions represents the available ListServiceAccounts() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +type ListServiceAccountsOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListServiceAccounts gets a list of service acxcounts. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +func (s *GroupsService) ListServiceAccounts(gid interface{}, opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*GroupServiceAccount, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var sa []*GroupServiceAccount + resp, err := s.client.Do(req, &sa) + if err != nil { + return nil, resp, err + } + + return sa, resp, nil +} + +// CreateServiceAccountOptions represents the available CreateServiceAccount() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-service-account-user +type CreateServiceAccountOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` +} + +// Creates a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-service-account-user +func (s *GroupsService) CreateServiceAccount(gid interface{}, opt *CreateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + sa := new(GroupServiceAccount) + resp, err := s.client.Do(req, sa) + if err != nil { + return nil, resp, err + } + + return sa, resp, nil +} + +// CreateServiceAccountPersonalAccessTokenOptions represents the available +// CreateServiceAccountPersonalAccessToken() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user +type CreateServiceAccountPersonalAccessTokenOptions struct { + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// CreateServiceAccountPersonalAccessToken add a new Personal Access Token for a +// service account user for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user +func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens", PathEscape(group), serviceAccount) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RotateServiceAccountPersonalAccessTokenOptions represents the available RotateServiceAccountPersonalAccessToken() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_service_accounts.html#rotate-a-personal-access-token-for-a-service-account-user +type RotateServiceAccountPersonalAccessTokenOptions struct { + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// RotateServiceAccountPersonalAccessToken rotates a Personal Access Token for a +// service account user for a group. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user +func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount, token int, opt *RotateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens/%d/rotate", PathEscape(group), serviceAccount, token) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// DeleteServiceAccount Deletes a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#delete-a-service-account-user +func (s *GroupsService) DeleteServiceAccount(gid interface{}, serviceAccount int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d", PathEscape(group), serviceAccount) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_ssh_certificates.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_ssh_certificates.go diff --git a/vendor/github.com/xanzy/go-gitlab/group_variables.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_variables.go similarity index 91% rename from vendor/github.com/xanzy/go-gitlab/group_variables.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_variables.go index aa253184..86c7d8bf 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_variables.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/group_variables.go @@ -41,6 +41,7 @@ type GroupVariable struct { VariableType VariableTypeValue `json:"variable_type"` Protected bool `json:"protected"` Masked bool `json:"masked"` + Hidden bool `json:"hidden"` Raw bool `json:"raw"` EnvironmentScope string `json:"environment_scope"` Description string `json:"description"` @@ -82,18 +83,27 @@ func (s *GroupVariablesService) ListVariables(gid interface{}, opt *ListGroupVar return vs, resp, nil } +// GetGroupVariableOptions represents the available GetVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#show-variable-details +type GetGroupVariableOptions struct { + Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` +} + // GetVariable gets a variable. // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_level_variables.html#show-variable-details -func (s *GroupVariablesService) GetVariable(gid interface{}, key string, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { +func (s *GroupVariablesService) GetVariable(gid interface{}, key string, opt *GetGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) if err != nil { return nil, nil, err } @@ -118,6 +128,7 @@ type CreateGroupVariableOptions struct { Description *string `url:"description,omitempty" json:"description,omitempty"` EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + MaskedAndHidden *bool `url:"masked_and_hidden,omitempty" json:"masked_and_hidden,omitempty"` Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/group_wikis.go b/vendor/gitlab.com/gitlab-org/api/client-go/group_wikis.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/group_wikis.go rename to vendor/gitlab.com/gitlab-org/api/client-go/group_wikis.go diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/gitlab.com/gitlab-org/api/client-go/groups.go similarity index 75% rename from vendor/github.com/xanzy/go-gitlab/groups.go rename to vendor/gitlab.com/gitlab-org/api/client-go/groups.go index a62750be..dfbced97 100644 --- a/vendor/github.com/xanzy/go-gitlab/groups.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/groups.go @@ -39,42 +39,44 @@ type GroupsService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html type Group struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - Description string `json:"description"` - MembershipLock bool `json:"membership_lock"` - Visibility VisibilityValue `json:"visibility"` - LFSEnabled bool `json:"lfs_enabled"` - DefaultBranchProtection int `json:"default_branch_protection"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - RequestAccessEnabled bool `json:"request_access_enabled"` - RepositoryStorage string `json:"repository_storage"` - FullName string `json:"full_name"` - FullPath string `json:"full_path"` - FileTemplateProjectID int `json:"file_template_project_id"` - ParentID int `json:"parent_id"` - Projects []*Project `json:"projects"` - Statistics *Statistics `json:"statistics"` - CustomAttributes []*CustomAttribute `json:"custom_attributes"` - ShareWithGroupLock bool `json:"share_with_group_lock"` - RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` - AutoDevopsEnabled bool `json:"auto_devops_enabled"` - SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` - EmailsEnabled bool `json:"emails_enabled"` - MentionsDisabled bool `json:"mentions_disabled"` - RunnersToken string `json:"runners_token"` - SharedProjects []*Project `json:"shared_projects"` - SharedRunnersSetting SharedRunnersSettingValue `json:"shared_runners_setting"` - SharedWithGroups []struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Description string `json:"description"` + MembershipLock bool `json:"membership_lock"` + Visibility VisibilityValue `json:"visibility"` + LFSEnabled bool `json:"lfs_enabled"` + DefaultBranch string `json:"default_branch"` + DefaultBranchProtectionDefaults *BranchProtectionDefaults `json:"default_branch_protection_defaults"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + RequestAccessEnabled bool `json:"request_access_enabled"` + RepositoryStorage string `json:"repository_storage"` + FullName string `json:"full_name"` + FullPath string `json:"full_path"` + FileTemplateProjectID int `json:"file_template_project_id"` + ParentID int `json:"parent_id"` + Projects []*Project `json:"projects"` + Statistics *Statistics `json:"statistics"` + CustomAttributes []*CustomAttribute `json:"custom_attributes"` + ShareWithGroupLock bool `json:"share_with_group_lock"` + RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` + AutoDevopsEnabled bool `json:"auto_devops_enabled"` + SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` + EmailsEnabled bool `json:"emails_enabled"` + MentionsDisabled bool `json:"mentions_disabled"` + RunnersToken string `json:"runners_token"` + SharedProjects []*Project `json:"shared_projects"` + SharedRunnersSetting SharedRunnersSettingValue `json:"shared_runners_setting"` + SharedWithGroups []struct { GroupID int `json:"group_id"` GroupName string `json:"group_name"` GroupFullPath string `json:"group_full_path"` GroupAccessLevel int `json:"group_access_level"` ExpiresAt *ISOTime `json:"expires_at"` + MemberRoleID int `json:"member_role_id"` } `json:"shared_with_groups"` LDAPCN string `json:"ldap_cn"` LDAPAccess AccessLevelValue `json:"ldap_access"` @@ -86,10 +88,33 @@ type Group struct { MarkedForDeletionOn *ISOTime `json:"marked_for_deletion_on"` CreatedAt *time.Time `json:"created_at"` IPRestrictionRanges string `json:"ip_restriction_ranges"` + AllowedEmailDomainsList string `json:"allowed_email_domains_list"` WikiAccessLevel AccessControlValue `json:"wiki_access_level"` // Deprecated: Use EmailsEnabled instead EmailsDisabled bool `json:"emails_disabled"` + + // Deprecated: Use DefaultBranchProtectionDefaults instead + DefaultBranchProtection int `json:"default_branch_protection"` +} + +// BranchProtectionDefaults represents default Git protected branch permissions. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type BranchProtectionDefaults struct { + AllowedToPush []*GroupAccessLevel `json:"allowed_to_push,omitempty"` + AllowForcePush bool `json:"allow_force_push,omitempty"` + AllowedToMerge []*GroupAccessLevel `json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush bool `json:"developer_can_initial_push,omitempty"` +} + +// GroupAccessLevel represents default branch protection defaults access levels. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type GroupAccessLevel struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` } // GroupAvatar represents a GitLab group avatar. @@ -123,8 +148,9 @@ type LDAPGroupLink struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#saml-group-links type SAMLGroupLink struct { - Name string `json:"name"` - AccessLevel AccessLevelValue `json:"access_level"` + Name string `json:"name"` + AccessLevel AccessLevelValue `json:"access_level"` + MemberRoleID int `json:"member_role_id,omitempty"` } // ListGroupsOptions represents the available ListGroups() options. @@ -338,31 +364,46 @@ func (s *GroupsService) DownloadAvatar(gid interface{}, options ...RequestOption // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#new-group type CreateGroupOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Avatar *GroupAvatar `url:"-" json:"-"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` - RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` - EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` - MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` - SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` - ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` - IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Avatar *GroupAvatar `url:"-" json:"-"` + DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` + RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` + ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` // Deprecated: Use EmailsEnabled instead EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` + + // Deprecated: User DefaultBranchProtectionDefaults instead + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` +} + +// DefaultBranchProtectionDefaultsOptions represents the available options for +// using default_branch_protection_defaults in CreateGroup() or UpdateGroup() +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type DefaultBranchProtectionDefaultsOptions struct { + AllowedToPush *[]*GroupAccessLevel `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` } // CreateGroup creates a new project group. Available only for users who can @@ -467,34 +508,39 @@ func (s *GroupsService) TransferSubGroup(gid interface{}, opt *TransferSubGroupO // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group type UpdateGroupOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Avatar *GroupAvatar `url:"-" json:"avatar,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` - RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` - EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` - MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` - ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` - PreventForkingOutsideGroup *bool `url:"prevent_forking_outside_group,omitempty" json:"prevent_forking_outside_group,omitempty"` - SharedRunnersSetting *SharedRunnersSettingValue `url:"shared_runners_setting,omitempty" json:"shared_runners_setting,omitempty"` - PreventSharingGroupsOutsideHierarchy *bool `url:"prevent_sharing_groups_outside_hierarchy,omitempty" json:"prevent_sharing_groups_outside_hierarchy,omitempty"` - IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Avatar *GroupAvatar `url:"-" json:"avatar,omitempty"` + DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` + RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` + ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` + PreventForkingOutsideGroup *bool `url:"prevent_forking_outside_group,omitempty" json:"prevent_forking_outside_group,omitempty"` + SharedRunnersSetting *SharedRunnersSettingValue `url:"shared_runners_setting,omitempty" json:"shared_runners_setting,omitempty"` + PreventSharingGroupsOutsideHierarchy *bool `url:"prevent_sharing_groups_outside_hierarchy,omitempty" json:"prevent_sharing_groups_outside_hierarchy,omitempty"` + IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` + AllowedEmailDomainsList *string `url:"allowed_email_domains_list,omitempty" json:"allowed_email_domains_list,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` // Deprecated: Use EmailsEnabled instead EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` + + // Deprecated: Use DefaultBranchProtectionDefaults instead + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` } // UpdateGroup updates an existing group; only available to group owners and @@ -880,6 +926,7 @@ func (s *GroupsService) GetGroupSAMLLink(gid interface{}, samlGroupName string, type AddGroupSAMLLinkOptions struct { SAMLGroupName *string `url:"saml_group_name,omitempty" json:"saml_group_name,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // AddGroupSAMLLink creates a new group SAML link. Available only for users who @@ -933,9 +980,10 @@ func (s *GroupsService) DeleteGroupSAMLLink(gid interface{}, samlGroupName strin // GitLab API docs: // https://docs.gitlab.com/ee/api/groups.html#share-groups-with-groups type ShareGroupWithGroupOptions struct { - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupAccess *AccessLevelValue `url:"group_access,omitempty" json:"group_access,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupAccess *AccessLevelValue `url:"group_access,omitempty" json:"group_access,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // ShareGroupWithGroup shares a group with another group. @@ -999,7 +1047,9 @@ type GroupPushRules struct { FileNameRegex string `json:"file_name_regex"` MaxFileSize int `json:"max_file_size"` CommitCommitterCheck bool `json:"commit_committer_check"` + CommitCommitterNameCheck bool `json:"commit_committer_name_check"` RejectUnsignedCommits bool `json:"reject_unsigned_commits"` + RejectNonDCOCommits bool `json:"reject_non_dco_commits"` } // GetGroupPushRules gets the push rules of a group. @@ -1036,6 +1086,7 @@ type AddGroupPushRuleOptions struct { AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` @@ -1044,6 +1095,7 @@ type AddGroupPushRuleOptions struct { MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } // AddGroupPushRule adds push rules to the specified group. @@ -1080,6 +1132,7 @@ type EditGroupPushRuleOptions struct { AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` @@ -1088,6 +1141,7 @@ type EditGroupPushRuleOptions struct { MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } // EditGroupPushRule edits a push rule for a specified group. diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/import.go b/vendor/gitlab.com/gitlab-org/api/client-go/import.go new file mode 100644 index 00000000..a8164a70 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/import.go @@ -0,0 +1,266 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "net/http" +) + +// ImportService handles communication with the import +// related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html +type ImportService struct { + client *Client +} + +// GitHubImport represents the response from an import from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +type GitHubImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` + RelationType string `json:"relation_type"` + ImportWarning string `json:"import_warning"` +} + +func (s GitHubImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromGitHubOptions represents the available +// ImportRepositoryFromGitHub() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +type ImportRepositoryFromGitHubOptions struct { + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` + RepoID *int `url:"repo_id,omitempty" json:"repo_id,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` + GitHubHostname *string `url:"github_hostname,omitempty" json:"github_hostname,omitempty"` + OptionalStages struct { + SingleEndpointNotesImport *bool `url:"single_endpoint_notes_import,omitempty" json:"single_endpoint_notes_import,omitempty"` + AttachmentsImport *bool `url:"attachments_import,omitempty" json:"attachments_import,omitempty"` + CollaboratorsImport *bool `url:"collaborators_import,omitempty" json:"collaborators_import,omitempty"` + } `url:"optional_stages,omitempty" json:"optional_stages,omitempty"` + TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` +} + +// Import a repository from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +func (s *ImportService) ImportRepositoryFromGitHub(opt *ImportRepositoryFromGitHubOptions, options ...RequestOptionFunc) (*GitHubImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github", opt, options) + if err != nil { + return nil, nil, err + } + + gi := new(GitHubImport) + resp, err := s.client.Do(req, gi) + if err != nil { + return nil, resp, err + } + + return gi, resp, nil +} + +// CancelledGitHubImport represents the response when canceling +// an import from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +type CancelledGitHubImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` +} + +func (s CancelledGitHubImport) String() string { + return Stringify(s) +} + +// CancelGitHubProjectImportOptions represents the available +// CancelGitHubProjectImport() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +type CancelGitHubProjectImportOptions struct { + ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` +} + +// Cancel an import of a repository from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +func (s *ImportService) CancelGitHubProjectImport(opt *CancelGitHubProjectImportOptions, options ...RequestOptionFunc) (*CancelledGitHubImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github/cancel", opt, options) + if err != nil { + return nil, nil, err + } + + cgi := new(CancelledGitHubImport) + resp, err := s.client.Do(req, cgi) + if err != nil { + return nil, resp, err + } + + return cgi, resp, nil +} + +// ImportGitHubGistsIntoGitLabSnippetsOptions represents the available +// ImportGitHubGistsIntoGitLabSnippets() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-github-gists-into-gitlab-snippets +type ImportGitHubGistsIntoGitLabSnippetsOptions struct { + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` +} + +// Import personal GitHub Gists into personal GitLab Snippets. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-github-gists-into-gitlab-snippets +func (s *ImportService) ImportGitHubGistsIntoGitLabSnippets(opt *ImportGitHubGistsIntoGitLabSnippetsOptions, options ...RequestOptionFunc) (*Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github/gists", opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// BitbucketServerImport represents the response from an import from Bitbucket +// Server. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +type BitbucketServerImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` +} + +func (s BitbucketServerImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromBitbucketServerOptions represents the available ImportRepositoryFromBitbucketServer() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +type ImportRepositoryFromBitbucketServerOptions struct { + BitbucketServerUrl *string `url:"bitbucket_server_url,omitempty" json:"bitbucket_server_url,omitempty"` + BitbucketServerUsername *string `url:"bitbucket_server_username,omitempty" json:"bitbucket_server_username,omitempty"` + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` + BitbucketServerProject *string `url:"bitbucket_server_project,omitempty" json:"bitbucket_server_project,omitempty"` + BitbucketServerRepo *string `url:"bitbucket_server_repo,omitempty" json:"bitbucket_server_repo,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + NewNamespace *string `url:"new_namespace,omitempty" json:"new_namespace,omitempty"` + TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` +} + +// Import a repository from Bitbucket Server. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +func (s *ImportService) ImportRepositoryFromBitbucketServer(opt *ImportRepositoryFromBitbucketServerOptions, options ...RequestOptionFunc) (*BitbucketServerImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket_server", opt, options) + if err != nil { + return nil, nil, err + } + + bsi := new(BitbucketServerImport) + resp, err := s.client.Do(req, bsi) + if err != nil { + return nil, resp, err + } + + return bsi, resp, nil +} + +// BitbucketCloudImport represents the response from an import from Bitbucket +// Cloud. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +type BitbucketCloudImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` + RelationType string `json:"relation_type"` + ImportWarning string `json:"import_warning"` +} + +func (s BitbucketCloudImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromBitbucketCloudOptions represents the available +// ImportRepositoryFromBitbucketCloud() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +type ImportRepositoryFromBitbucketCloudOptions struct { + BitbucketUsername *string `url:"bitbucket_username,omitempty" json:"bitbucket_username,omitempty"` + BitbucketAppPassword *string `url:"bitbucket_app_password,omitempty" json:"bitbucket_app_password,omitempty"` + RepoPath *string `url:"repo_path,omitempty" json:"repo_path,omitempty"` + TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` +} + +// Import a repository from Bitbucket Cloud. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +func (s *ImportService) ImportRepositoryFromBitbucketCloud(opt *ImportRepositoryFromBitbucketCloudOptions, options ...RequestOptionFunc) (*BitbucketCloudImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket", opt, options) + if err != nil { + return nil, nil, err + } + + bci := new(BitbucketCloudImport) + resp, err := s.client.Do(req, bci) + if err != nil { + return nil, resp, err + } + + return bci, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/instance_clusters.go b/vendor/gitlab.com/gitlab-org/api/client-go/instance_clusters.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/instance_clusters.go rename to vendor/gitlab.com/gitlab-org/api/client-go/instance_clusters.go diff --git a/vendor/github.com/xanzy/go-gitlab/instance_variables.go b/vendor/gitlab.com/gitlab-org/api/client-go/instance_variables.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/instance_variables.go rename to vendor/gitlab.com/gitlab-org/api/client-go/instance_variables.go diff --git a/vendor/github.com/xanzy/go-gitlab/invites.go b/vendor/gitlab.com/gitlab-org/api/client-go/invites.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/invites.go rename to vendor/gitlab.com/gitlab-org/api/client-go/invites.go diff --git a/vendor/github.com/xanzy/go-gitlab/issue_links.go b/vendor/gitlab.com/gitlab-org/api/client-go/issue_links.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/issue_links.go rename to vendor/gitlab.com/gitlab-org/api/client-go/issue_links.go diff --git a/vendor/github.com/xanzy/go-gitlab/issues.go b/vendor/gitlab.com/gitlab-org/api/client-go/issues.go similarity index 85% rename from vendor/github.com/xanzy/go-gitlab/issues.go rename to vendor/gitlab.com/gitlab-org/api/client-go/issues.go index eecccc47..aaa4e850 100644 --- a/vendor/github.com/xanzy/go-gitlab/issues.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/issues.go @@ -120,6 +120,7 @@ type Issue struct { Epic *Epic `json:"epic"` Iteration *GroupIteration `json:"iteration"` TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` + ServiceDeskReplyTo string `json:"service_desk_reply_to"` } func (i Issue) String() string { @@ -246,29 +247,29 @@ type ListGroupIssuesOptions struct { NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` Scope *string `url:"scope,omitempty" json:"scope,omitempty"` AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + NotAuthorID *int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - NotSearch *string `url:"not[search],omitempty" json:"not[search],omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + NotAssigneeID *int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + NotSearch *string `url:"not[search],omitempty" json:"not[search],omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // ListGroupIssues gets a list of group issues. This function accepts @@ -301,37 +302,37 @@ func (s *IssuesService) ListGroupIssues(pid interface{}, opt *ListGroupIssuesOpt // GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-project-issues type ListProjectIssuesOptions struct { ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` + NotAuthorID *int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + NotAssigneeID *int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // ListProjectIssues gets a list of project issues. This function accepts diff --git a/vendor/github.com/xanzy/go-gitlab/issues_statistics.go b/vendor/gitlab.com/gitlab-org/api/client-go/issues_statistics.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/issues_statistics.go rename to vendor/gitlab.com/gitlab-org/api/client-go/issues_statistics.go diff --git a/vendor/github.com/xanzy/go-gitlab/job_token_scope.go b/vendor/gitlab.com/gitlab-org/api/client-go/job_token_scope.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/job_token_scope.go rename to vendor/gitlab.com/gitlab-org/api/client-go/job_token_scope.go diff --git a/vendor/github.com/xanzy/go-gitlab/jobs.go b/vendor/gitlab.com/gitlab-org/api/client-go/jobs.go similarity index 98% rename from vendor/github.com/xanzy/go-gitlab/jobs.go rename to vendor/gitlab.com/gitlab-org/api/client-go/jobs.go index 9ad00665..f25c020f 100644 --- a/vendor/github.com/xanzy/go-gitlab/jobs.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/jobs.go @@ -516,9 +516,9 @@ type PlayJobOptions struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/jobs.html#run-a-job type JobVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // PlayJob triggers a manual action to start a job. diff --git a/vendor/github.com/xanzy/go-gitlab/keys.go b/vendor/gitlab.com/gitlab-org/api/client-go/keys.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/keys.go rename to vendor/gitlab.com/gitlab-org/api/client-go/keys.go diff --git a/vendor/github.com/xanzy/go-gitlab/labels.go b/vendor/gitlab.com/gitlab-org/api/client-go/labels.go similarity index 90% rename from vendor/github.com/xanzy/go-gitlab/labels.go rename to vendor/gitlab.com/gitlab-org/api/client-go/labels.go index d36e85b0..bc73669e 100644 --- a/vendor/github.com/xanzy/go-gitlab/labels.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/labels.go @@ -108,12 +108,12 @@ func (s *LabelsService) ListLabels(pid interface{}, opt *ListLabelsOptions, opti // GetLabel get a single label for a given project. // // GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#get-a-single-project-label -func (s *LabelsService) GetLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Label, *Response, error) { +func (s *LabelsService) GetLabel(pid interface{}, lid interface{}, options ...RequestOptionFunc) (*Label, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err } - label, err := parseID(labelID) + label, err := parseID(lid) if err != nil { return nil, nil, err } @@ -216,13 +216,21 @@ type UpdateLabelOptions struct { // one parameter is required, to update the label. // // GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#edit-an-existing-label -func (s *LabelsService) UpdateLabel(pid interface{}, opt *UpdateLabelOptions, options ...RequestOptionFunc) (*Label, *Response, error) { +func (s *LabelsService) UpdateLabel(pid interface{}, lid interface{}, opt *UpdateLabelOptions, options ...RequestOptionFunc) (*Label, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err } u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) + if lid != nil { + label, err := parseID(lid) + if err != nil { + return nil, nil, err + } + u = fmt.Sprintf("projects/%s/labels/%s", PathEscape(project), PathEscape(label)) + } + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { return nil, nil, err @@ -243,12 +251,12 @@ func (s *LabelsService) UpdateLabel(pid interface{}, opt *UpdateLabelOptions, op // // GitLab API docs: // https://docs.gitlab.com/ee/api/labels.html#subscribe-to-a-label -func (s *LabelsService) SubscribeToLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Label, *Response, error) { +func (s *LabelsService) SubscribeToLabel(pid interface{}, lid interface{}, options ...RequestOptionFunc) (*Label, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err } - label, err := parseID(labelID) + label, err := parseID(lid) if err != nil { return nil, nil, err } @@ -274,12 +282,12 @@ func (s *LabelsService) SubscribeToLabel(pid interface{}, labelID interface{}, o // // GitLab API docs: // https://docs.gitlab.com/ee/api/labels.html#unsubscribe-from-a-label -func (s *LabelsService) UnsubscribeFromLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { +func (s *LabelsService) UnsubscribeFromLabel(pid interface{}, lid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { return nil, err } - label, err := parseID(labelID) + label, err := parseID(lid) if err != nil { return nil, err } @@ -297,12 +305,12 @@ func (s *LabelsService) UnsubscribeFromLabel(pid interface{}, labelID interface{ // // GitLab API docs: // https://docs.gitlab.com/ee/api/labels.html#promote-a-project-label-to-a-group-label -func (s *LabelsService) PromoteLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { +func (s *LabelsService) PromoteLabel(pid interface{}, lid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { return nil, err } - label, err := parseID(labelID) + label, err := parseID(lid) if err != nil { return nil, err } diff --git a/vendor/github.com/xanzy/go-gitlab/license.go b/vendor/gitlab.com/gitlab-org/api/client-go/license.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/license.go rename to vendor/gitlab.com/gitlab-org/api/client-go/license.go diff --git a/vendor/github.com/xanzy/go-gitlab/license_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/license_templates.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/license_templates.go rename to vendor/gitlab.com/gitlab-org/api/client-go/license_templates.go diff --git a/vendor/github.com/xanzy/go-gitlab/markdown.go b/vendor/gitlab.com/gitlab-org/api/client-go/markdown.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/markdown.go rename to vendor/gitlab.com/gitlab-org/api/client-go/markdown.go diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/member_roles.go b/vendor/gitlab.com/gitlab-org/api/client-go/member_roles.go new file mode 100644 index 00000000..4d791a91 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/member_roles.go @@ -0,0 +1,144 @@ +package gitlab + +import ( + "fmt" + "net/http" +) + +// MemberRolesService handles communication with the member roles related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html +type MemberRolesService struct { + client *Client +} + +// MemberRole represents a GitLab member role. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html +type MemberRole struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + GroupID int `json:"group_id"` + BaseAccessLevel AccessLevelValue `json:"base_access_level"` + AdminCICDVariables bool `json:"admin_cicd_variables,omitempty"` + AdminComplianceFramework bool `json:"admin_compliance_framework,omitempty"` + AdminGroupMembers bool `json:"admin_group_member,omitempty"` + AdminMergeRequests bool `json:"admin_merge_request,omitempty"` + AdminPushRules bool `json:"admin_push_rules,omitempty"` + AdminTerraformState bool `json:"admin_terraform_state,omitempty"` + AdminVulnerability bool `json:"admin_vulnerability,omitempty"` + AdminWebHook bool `json:"admin_web_hook,omitempty"` + ArchiveProject bool `json:"archive_project,omitempty"` + ManageDeployTokens bool `json:"manage_deploy_tokens,omitempty"` + ManageGroupAccesToken bool `json:"manage_group_access_tokens,omitempty"` + ManageMergeRequestSettings bool `json:"manage_merge_request_settings,omitempty"` + ManageProjectAccessToken bool `json:"manage_project_access_tokens,omitempty"` + ManageSecurityPolicyLink bool `json:"manage_security_policy_link,omitempty"` + ReadCode bool `json:"read_code,omitempty"` + ReadRunners bool `json:"read_runners,omitempty"` + ReadDependency bool `json:"read_dependency,omitempty"` + ReadVulnerability bool `json:"read_vulnerability,omitempty"` + RemoveGroup bool `json:"remove_group,omitempty"` + RemoveProject bool `json:"remove_project,omitempty"` +} + +// ListMemberRoles gets a list of member roles for a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/member_roles.html#list-all-member-roles-of-a-group +func (s *MemberRolesService) ListMemberRoles(gid interface{}, options ...RequestOptionFunc) ([]*MemberRole, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var mrs []*MemberRole + resp, err := s.client.Do(req, &mrs) + if err != nil { + return nil, resp, err + } + + return mrs, resp, nil +} + +// CreateMemberRoleOptions represents the available CreateMemberRole() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group +type CreateMemberRoleOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + BaseAccessLevel *AccessLevelValue `url:"base_access_level,omitempty" json:"base_access_level,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + AdminCICDVariables *bool `url:"admin_cicd_variables" json:"admin_cicd_variables,omitempty"` + AdminComplianceFramework *bool `url:"admin_compliance_framework" json:"admin_compliance_framework,omitempty"` + AdminGroupMembers *bool `url:"admin_group_member" json:"admin_group_member,omitempty"` + AdminMergeRequest *bool `url:"admin_merge_request,omitempty" json:"admin_merge_request,omitempty"` + AdminPushRules *bool `url:"admin_push_rules" json:"admin_push_rules,omitempty"` + AdminTerraformState *bool `url:"admin_terraform_state" json:"admin_terraform_state,omitempty"` + AdminVulnerability *bool `url:"admin_vulnerability,omitempty" json:"admin_vulnerability,omitempty"` + AdminWebHook *bool `url:"admin_web_hook" json:"admin_web_hook,omitempty"` + ArchiveProject *bool `url:"archive_project" json:"archive_project,omitempty"` + ManageDeployTokens *bool `url:"manage_deploy_tokens" json:"manage_deploy_tokens,omitempty"` + ManageGroupAccesToken *bool `url:"manage_group_access_tokens" json:"manage_group_access_tokens,omitempty"` + ManageMergeRequestSettings *bool `url:"manage_merge_request_settings" json:"manage_merge_request_settings,omitempty"` + ManageProjectAccessToken *bool `url:"manage_project_access_tokens" json:"manage_project_access_tokens,omitempty"` + ManageSecurityPolicyLink *bool `url:"manage_security_policy_link" json:"manage_security_policy_link,omitempty"` + ReadCode *bool `url:"read_code,omitempty" json:"read_code,omitempty"` + ReadRunners *bool `url:"read_runners" json:"read_runners,omitempty"` + ReadDependency *bool `url:"read_dependency,omitempty" json:"read_dependency,omitempty"` + ReadVulnerability *bool `url:"read_vulnerability,omitempty" json:"read_vulnerability,omitempty"` + RemoveGroup *bool `url:"remove_group" json:"remove_group,omitempty"` + RemoveProject *bool `url:"remove_project" json:"remove_project,omitempty"` +} + +// CreateMemberRole creates a new member role for a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group +func (s *MemberRolesService) CreateMemberRole(gid interface{}, opt *CreateMemberRoleOptions, options ...RequestOptionFunc) (*MemberRole, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + mr := new(MemberRole) + resp, err := s.client.Do(req, mr) + if err != nil { + return nil, resp, err + } + + return mr, resp, nil +} + +// DeleteMemberRole deletes a member role from a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/member_roles.html#remove-member-role-of-a-group +func (s *MemberRolesService) DeleteMemberRole(gid interface{}, memberRole int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/member_roles/%d", PathEscape(group), memberRole) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go b/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go similarity index 99% rename from vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go rename to vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go index 7eadc82b..d2f1f81f 100644 --- a/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go @@ -94,6 +94,7 @@ type MergeRequestApprovalRule struct { ID int `json:"id"` Name string `json:"name"` RuleType string `json:"rule_type"` + ReportType string `json:"report_type"` EligibleApprovers []*BasicUser `json:"eligible_approvers"` ApprovalsRequired int `json:"approvals_required"` SourceRule *ProjectApprovalRule `json:"source_rule"` diff --git a/vendor/github.com/xanzy/go-gitlab/merge_requests.go b/vendor/gitlab.com/gitlab-org/api/client-go/merge_requests.go similarity index 84% rename from vendor/github.com/xanzy/go-gitlab/merge_requests.go rename to vendor/gitlab.com/gitlab-org/api/client-go/merge_requests.go index a9e8d2e5..506e3303 100644 --- a/vendor/github.com/xanzy/go-gitlab/merge_requests.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/merge_requests.go @@ -17,6 +17,7 @@ package gitlab import ( + "bytes" "encoding/json" "fmt" "net/http" @@ -533,6 +534,42 @@ func (s *MergeRequestsService) ListMergeRequestDiffs(pid interface{}, mergeReque return m, resp, nil } +// ShowMergeRequestRawDiffsOptions represents the available ShowMergeRequestRawDiffs() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#show-merge-request-raw-diffs +type ShowMergeRequestRawDiffsOptions struct{} + +// ShowMergeRequestRawDiffs Show raw diffs of the files changed in a merge request +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#show-merge-request-raw-diffs +func (s *MergeRequestsService) ShowMergeRequestRawDiffs(pid interface{}, mergeRequest int, opt *ShowMergeRequestRawDiffsOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return []byte{}, nil, err + } + u := fmt.Sprintf( + "projects/%s/merge_requests/%d/raw_diffs", + PathEscape(project), + mergeRequest, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return []byte{}, nil, err + } + + var rd bytes.Buffer + resp, err := s.client.Do(req, &rd) + if err != nil { + return []byte{}, resp, err + } + + return rd.Bytes(), resp, nil +} + // GetMergeRequestParticipants gets a list of merge request participants. // // GitLab API docs: @@ -1078,3 +1115,160 @@ func (s *MergeRequestsService) ResetSpentTime(pid interface{}, mergeRequest int, func (s *MergeRequestsService) GetTimeSpent(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { return s.timeStats.getTimeSpent(pid, "merge_requests", mergeRequest, options...) } + +// MergeRequestDependency represents a GitLab merge request dependency. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#create-a-merge-request-dependency +type MergeRequestDependency struct { + ID int `json:"id"` + BlockingMergeRequest BlockingMergeRequest `json:"blocking_merge_request"` + ProjectID int `json:"project_id"` +} + +// BlockingMergeRequest represents a GitLab merge request dependency. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#create-a-merge-request-dependency +type BlockingMergeRequest struct { + ID int `json:"id"` + Iid int `json:"iid"` + TargetBranch string `json:"target_branch"` + SourceBranch string `json:"source_branch"` + ProjectID int `json:"project_id"` + Title string `json:"title"` + State string `json:"state"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Upvotes int `json:"upvotes"` + Downvotes int `json:"downvotes"` + Author *BasicUser `json:"author"` + Assignee *BasicUser `json:"assignee"` + Assignees []*BasicUser `json:"assignees"` + Reviewers []*BasicUser `json:"reviewers"` + SourceProjectID int `json:"source_project_id"` + TargetProjectID int `json:"target_project_id"` + Labels *LabelOptions `json:"labels"` + Description string `json:"description"` + Draft bool `json:"draft"` + WorkInProgress bool `json:"work_in_progress"` + Milestone *string `json:"milestone"` + MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` + DetailedMergeStatus string `json:"detailed_merge_status"` + MergedBy *BasicUser `json:"merged_by"` + MergedAt *time.Time `json:"merged_at"` + ClosedBy *BasicUser `json:"closed_by"` + ClosedAt *time.Time `json:"closed_at"` + Sha string `json:"sha"` + MergeCommitSha string `json:"merge_commit_sha"` + SquashCommitSha string `json:"squash_commit_sha"` + UserNotesCount int `json:"user_notes_count"` + ShouldRemoveSourceBranch *bool `json:"should_remove_source_branch"` + ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` + WebURL string `json:"web_url"` + References *IssueReferences `json:"references"` + DiscussionLocked *bool `json:"discussion_locked"` + TimeStats *TimeStats `json:"time_stats"` + Squash bool `json:"squash"` + ApprovalsBeforeMerge *int `json:"approvals_before_merge"` + Reference string `json:"reference"` + TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` + HasConflicts bool `json:"has_conflicts"` + BlockingDiscussionsResolved bool `json:"blocking_discussions_resolved"` + MergeStatus string `json:"merge_status"` + MergeUser *BasicUser `json:"merge_user"` + MergeAfter time.Time `json:"merge_after"` + Imported bool `json:"imported"` + ImportedFrom string `json:"imported_from"` + PreparedAt *time.Time `json:"prepared_at"` + SquashOnMerge bool `json:"squash_on_merge"` +} + +func (m MergeRequestDependency) String() string { + return Stringify(m) +} + +// CreateMergeRequestDependencyOptions represents the available CreateMergeRequestDependency() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#create-a-merge-request-dependency +type CreateMergeRequestDependencyOptions struct { + BlockingMergeRequestID *int `url:"blocking_merge_request_id,omitempty" json:"blocking_merge_request_id,omitempty"` +} + +// CreateMergeRequestDependency creates a new merge request dependency for a given +// merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#create-a-merge-request-dependency +func (s *MergeRequestsService) CreateMergeRequestDependency(pid interface{}, mergeRequest int, opts CreateMergeRequestDependencyOptions, options ...RequestOptionFunc) ([]MergeRequestDependency, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/blocks", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opts, options) + if err != nil { + return nil, nil, err + } + + var mrd []MergeRequestDependency + resp, err := s.client.Do(req, &mrd) + if err != nil { + return nil, resp, err + } + + return mrd, resp, err +} + +// DeleteMergeRequestDependency deletes a merge request dependency for a given +// merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#delete-a-merge-request-dependency +func (s *MergeRequestsService) DeleteMergeRequestDependency(pid interface{}, mergeRequest int, blockingMergeRequest int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/blocks/%d", PathEscape(project), mergeRequest, blockingMergeRequest) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return resp, err + } + + return resp, err +} + +// GetMergeRequestDependencies gets a list of merge request dependencies. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-merge-request-dependencies +func (s *MergeRequestsService) GetMergeRequestDependencies(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]MergeRequestDependency, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/blocks", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var mrd []MergeRequestDependency + resp, err := s.client.Do(req, &mrd) + if err != nil { + return nil, resp, err + } + + return mrd, resp, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/merge_trains.go b/vendor/gitlab.com/gitlab-org/api/client-go/merge_trains.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/merge_trains.go rename to vendor/gitlab.com/gitlab-org/api/client-go/merge_trains.go diff --git a/vendor/github.com/xanzy/go-gitlab/metadata.go b/vendor/gitlab.com/gitlab-org/api/client-go/metadata.go similarity index 89% rename from vendor/github.com/xanzy/go-gitlab/metadata.go rename to vendor/gitlab.com/gitlab-org/api/client-go/metadata.go index db23a81e..3550c494 100644 --- a/vendor/github.com/xanzy/go-gitlab/metadata.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/metadata.go @@ -33,9 +33,10 @@ type Metadata struct { Version string `json:"version"` Revision string `json:"revision"` KAS struct { - Enabled bool `json:"enabled"` - ExternalURL string `json:"externalUrl"` - Version string `json:"version"` + Enabled bool `json:"enabled"` + ExternalURL string `json:"externalUrl"` + ExternalK8SProxyURL string `json:"externalK8sProxyUrl"` + Version string `json:"version"` } `json:"kas"` Enterprise bool `json:"enterprise"` } diff --git a/vendor/github.com/xanzy/go-gitlab/milestones.go b/vendor/gitlab.com/gitlab-org/api/client-go/milestones.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/milestones.go rename to vendor/gitlab.com/gitlab-org/api/client-go/milestones.go diff --git a/vendor/github.com/xanzy/go-gitlab/namespaces.go b/vendor/gitlab.com/gitlab-org/api/client-go/namespaces.go similarity index 95% rename from vendor/github.com/xanzy/go-gitlab/namespaces.go rename to vendor/gitlab.com/gitlab-org/api/client-go/namespaces.go index da82a0c5..eaf53867 100644 --- a/vendor/github.com/xanzy/go-gitlab/namespaces.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/namespaces.go @@ -59,8 +59,9 @@ func (n Namespace) String() string { // GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html#list-namespaces type ListNamespacesOptions struct { ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` - OwnedOnly *bool `url:"owned_only,omitempty" json:"owned_only,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + OwnedOnly *bool `url:"owned_only,omitempty" json:"owned_only,omitempty"` + TopLevelOnly *bool `url:"top_level_only,omitempty" json:"top_level_only,omitempty"` } // ListNamespaces gets a list of projects accessible by the authenticated user. diff --git a/vendor/github.com/xanzy/go-gitlab/notes.go b/vendor/gitlab.com/gitlab-org/api/client-go/notes.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/notes.go rename to vendor/gitlab.com/gitlab-org/api/client-go/notes.go diff --git a/vendor/github.com/xanzy/go-gitlab/notifications.go b/vendor/gitlab.com/gitlab-org/api/client-go/notifications.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/notifications.go rename to vendor/gitlab.com/gitlab-org/api/client-go/notifications.go diff --git a/vendor/github.com/xanzy/go-gitlab/packages.go b/vendor/gitlab.com/gitlab-org/api/client-go/packages.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/packages.go rename to vendor/gitlab.com/gitlab-org/api/client-go/packages.go diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/pages.go b/vendor/gitlab.com/gitlab-org/api/client-go/pages.go new file mode 100644 index 00000000..7b0f503e --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pages.go @@ -0,0 +1,127 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +type PagesService struct { + client *Client +} + +// Pages represents the Pages of a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pages.html +type Pages struct { + URL string `json:"url"` + IsUniqueDomainEnabled bool `json:"is_unique_domain_enabled"` + ForceHTTPS bool `json:"force_https"` + Deployments []*PagesDeployment `json:"deployments"` +} + +// PagesDeployment represents a Pages deployment. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pages.html +type PagesDeployment struct { + CreatedAt time.Time `json:"created_at"` + URL string `json:"url"` + PathPrefix string `json:"path_prefix"` + RootDirectory string `json:"root_directory"` +} + +// UnpublishPages unpublished pages. The user must have admin privileges. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages.html#unpublish-pages +func (s *PagesService) UnpublishPages(gid interface{}, options ...RequestOptionFunc) (*Response, error) { + page, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/pages", PathEscape(page)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// GetPages lists Pages settings for a project. The user must have at least +// maintainer privileges. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/pages.html#get-pages-settings-for-a-project +func (s *PagesService) GetPages(gid interface{}, options ...RequestOptionFunc) (*Pages, *Response, error) { + project, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pages", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Pages) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// UpdatePages represents the available UpdatePages() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages.html#update-pages-settings-for-a-project +type UpdatePagesOptions struct { + PagesUniqueDomainEnabled *bool `url:"pages_unique_domain_enabled,omitempty" json:"pages_unique_domain_enabled,omitempty"` + PagesHTTPSOnly *bool `url:"pages_https_only,omitempty" json:"pages_https_only,omitempty"` +} + +// UpdatePages updates Pages settings for a project. The user must have +// administrator privileges. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/pages.html#update-pages-settings-for-a-project +func (s *PagesService) UpdatePages(pid interface{}, opt UpdatePagesOptions, options ...RequestOptionFunc) (*Pages, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pages", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(Pages) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/pages_domains.go b/vendor/gitlab.com/gitlab-org/api/client-go/pages_domains.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/pages_domains.go rename to vendor/gitlab.com/gitlab-org/api/client-go/pages_domains.go diff --git a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/personal_access_tokens.go similarity index 62% rename from vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go rename to vendor/gitlab.com/gitlab-org/api/client-go/personal_access_tokens.go index ba32bd8f..14aee9ee 100644 --- a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/personal_access_tokens.go @@ -57,7 +57,14 @@ func (p PersonalAccessToken) String() string { // https://docs.gitlab.com/ee/api/personal_access_tokens.html#list-personal-access-tokens type ListPersonalAccessTokensOptions struct { ListOptions - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + CreatedAfter *ISOTime `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *ISOTime `url:"created_before,omitempty" json:"created_before,omitempty"` + LastUsedAfter *ISOTime `url:"last_used_after,omitempty" json:"last_used_after,omitempty"` + LastUsedBefore *ISOTime `url:"last_used_before,omitempty" json:"last_used_before,omitempty"` + Revoked *bool `url:"revoked,omitempty" json:"revoked,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` } // ListPersonalAccessTokens gets a list of all personal access tokens. @@ -83,8 +90,8 @@ func (s *PersonalAccessTokensService) ListPersonalAccessTokens(opt *ListPersonal // // GitLab API docs: // https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-personal-access-token-id -func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(user int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := fmt.Sprintf("personal_access_tokens/%d", user) +func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := fmt.Sprintf("personal_access_tokens/%d", token) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) if err != nil { return nil, nil, err @@ -129,12 +136,17 @@ type RotatePersonalAccessTokenOptions struct { ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` } -// RotatePersonalAccessToken revokes a token and returns a new token that +// RotatePersonalAccessToken is a backwards-compat shim for RotatePersonalAccessTokenByID. +func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + return s.RotatePersonalAccessTokenByID(token, opt, options...) +} + +// RotatePersonalAccessTokenByID revokes a token and returns a new token that // expires in one week per default. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#rotate-a-personal-access-token -func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#use-a-personal-access-token-id +func (s *PersonalAccessTokensService) RotatePersonalAccessTokenByID(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { u := fmt.Sprintf("personal_access_tokens/%d/rotate", token) req, err := s.client.NewRequest(http.MethodPost, u, opt, options) @@ -151,11 +163,38 @@ func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt * return pat, resp, nil } -// RevokePersonalAccessToken revokes a personal access token. +// RotatePersonalAccessTokenSelf revokes the currently authenticated token +// and returns a new token that expires in one week per default. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#revoke-a-personal-access-token +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#use-a-request-header +func (s *PersonalAccessTokensService) RotatePersonalAccessTokenSelf(opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := "personal_access_tokens/self/rotate" + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RevokePersonalAccessToken is a backwards-compat shim for RevokePersonalAccessTokenByID. func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int, options ...RequestOptionFunc) (*Response, error) { + return s.RevokePersonalAccessTokenByID(token, options...) +} + +// RevokePersonalAccessTokenByID revokes a personal access token by its ID. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-personal-access-token-id-1 +func (s *PersonalAccessTokensService) RevokePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*Response, error) { u := fmt.Sprintf("personal_access_tokens/%d", token) req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) @@ -165,3 +204,19 @@ func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int, optio return s.client.Do(req, nil) } + +// RevokePersonalAccessTokenSelf revokes the currently authenticated +// personal access token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-request-header-1 +func (s *PersonalAccessTokensService) RevokePersonalAccessTokenSelf(options ...RequestOptionFunc) (*Response, error) { + u := "personal_access_tokens/self" + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go b/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_schedules.go similarity index 96% rename from vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go rename to vendor/gitlab.com/gitlab-org/api/client-go/pipeline_schedules.go index d7c85df6..51477f21 100644 --- a/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_schedules.go @@ -56,6 +56,7 @@ type LastPipeline struct { SHA string `json:"sha"` Ref string `json:"ref"` Status string `json:"status"` + WebURL string `json:"web_url"` } // ListPipelineSchedulesOptions represents the available ListPipelineTriggers() options. @@ -293,9 +294,9 @@ func (s *PipelineSchedulesService) RunPipelineSchedule(pid interface{}, schedule // GitLab API docs: // https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule type CreatePipelineScheduleVariableOptions struct { - Key *string `url:"key" json:"key"` - Value *string `url:"value" json:"value"` - VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` + Key *string `url:"key" json:"key"` + Value *string `url:"value" json:"value"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // CreatePipelineScheduleVariable creates a pipeline schedule variable. @@ -329,8 +330,8 @@ func (s *PipelineSchedulesService) CreatePipelineScheduleVariable(pid interface{ // GitLab API docs: // https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule-variable type EditPipelineScheduleVariableOptions struct { - Value *string `url:"value" json:"value"` - VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` + Value *string `url:"value" json:"value"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // EditPipelineScheduleVariable creates a pipeline schedule variable. diff --git a/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go b/vendor/gitlab.com/gitlab-org/api/client-go/pipeline_triggers.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go rename to vendor/gitlab.com/gitlab-org/api/client-go/pipeline_triggers.go diff --git a/vendor/github.com/xanzy/go-gitlab/pipelines.go b/vendor/gitlab.com/gitlab-org/api/client-go/pipelines.go similarity index 83% rename from vendor/github.com/xanzy/go-gitlab/pipelines.go rename to vendor/gitlab.com/gitlab-org/api/client-go/pipelines.go index 6dee9dfe..dd02acc9 100644 --- a/vendor/github.com/xanzy/go-gitlab/pipelines.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/pipelines.go @@ -34,9 +34,9 @@ type PipelinesService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html type PipelineVariable struct { - Key string `json:"key"` - Value string `json:"value"` - VariableType string `json:"variable_type"` + Key string `json:"key"` + Value string `json:"value"` + VariableType VariableTypeValue `json:"variable_type"` } // Pipeline represents a GitLab pipeline. @@ -142,6 +142,7 @@ type PipelineInfo struct { Source string `json:"source"` Ref string `json:"ref"` SHA string `json:"sha"` + Name string `json:"name"` WebURL string `json:"web_url"` UpdatedAt *time.Time `json:"updated_at"` CreatedAt *time.Time `json:"created_at"` @@ -151,9 +152,11 @@ func (p PipelineInfo) String() string { return Stringify(p) } -// ListProjectPipelinesOptions represents the available ListProjectPipelines() options. +// ListProjectPipelinesOptions represents the available ListProjectPipelines() +// options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#list-project-pipelines +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#list-project-pipelines type ListProjectPipelinesOptions struct { ListOptions Scope *string `url:"scope,omitempty" json:"scope,omitempty"` @@ -172,7 +175,8 @@ type ListProjectPipelinesOptions struct { // ListProjectPipelines gets a list of project piplines. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#list-project-pipelines +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#list-project-pipelines func (s *PipelinesService) ListProjectPipelines(pid interface{}, opt *ListProjectPipelinesOptions, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) { project, err := parseID(pid) if err != nil { @@ -196,7 +200,8 @@ func (s *PipelinesService) ListProjectPipelines(pid interface{}, opt *ListProjec // GetPipeline gets a single project pipeline. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-a-single-pipeline +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#get-a-single-pipeline func (s *PipelinesService) GetPipeline(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { project, err := parseID(pid) if err != nil { @@ -220,7 +225,8 @@ func (s *PipelinesService) GetPipeline(pid interface{}, pipeline int, options .. // GetPipelineVariables gets the variables of a single project pipeline. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-variables-of-a-pipeline +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#get-variables-of-a-pipeline func (s *PipelinesService) GetPipelineVariables(pid interface{}, pipeline int, options ...RequestOptionFunc) ([]*PipelineVariable, *Response, error) { project, err := parseID(pid) if err != nil { @@ -244,7 +250,8 @@ func (s *PipelinesService) GetPipelineVariables(pid interface{}, pipeline int, o // GetPipelineTestReport gets the test report of a single project pipeline. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-a-pipelines-test-report +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#get-a-pipelines-test-report func (s *PipelinesService) GetPipelineTestReport(pid interface{}, pipeline int, options ...RequestOptionFunc) (*PipelineTestReport, *Response, error) { project, err := parseID(pid) if err != nil { @@ -268,14 +275,16 @@ func (s *PipelinesService) GetPipelineTestReport(pid interface{}, pipeline int, // GetLatestPipelineOptions represents the available GetLatestPipeline() options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-the-latest-pipeline +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#get-the-latest-pipeline type GetLatestPipelineOptions struct { Ref *string `url:"ref,omitempty" json:"ref,omitempty"` } // GetLatestPipeline gets the latest pipeline for a specific ref in a project. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-the-latest-pipeline +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#get-the-latest-pipeline func (s *PipelinesService) GetLatestPipeline(pid interface{}, opt *GetLatestPipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { project, err := parseID(pid) if err != nil { @@ -299,7 +308,8 @@ func (s *PipelinesService) GetLatestPipeline(pid interface{}, opt *GetLatestPipe // CreatePipelineOptions represents the available CreatePipeline() options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline type CreatePipelineOptions struct { Ref *string `url:"ref" json:"ref"` Variables *[]*PipelineVariableOptions `url:"variables,omitempty" json:"variables,omitempty"` @@ -309,14 +319,15 @@ type CreatePipelineOptions struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline type PipelineVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // CreatePipeline creates a new project pipeline. // -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline func (s *PipelinesService) CreatePipeline(pid interface{}, opt *CreatePipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { project, err := parseID(pid) if err != nil { @@ -338,7 +349,7 @@ func (s *PipelinesService) CreatePipeline(pid interface{}, opt *CreatePipelineOp return p, resp, nil } -// RetryPipelineBuild retries failed builds in a pipeline +// RetryPipelineBuild retries failed builds in a pipeline. // // GitLab API docs: // https://docs.gitlab.com/ee/api/pipelines.html#retry-jobs-in-a-pipeline @@ -363,7 +374,7 @@ func (s *PipelinesService) RetryPipelineBuild(pid interface{}, pipeline int, opt return p, resp, nil } -// CancelPipelineBuild cancels a pipeline builds +// CancelPipelineBuild cancels a pipeline builds. // // GitLab API docs: // https://docs.gitlab.com/ee/api/pipelines.html#cancel-a-pipelines-jobs @@ -406,3 +417,38 @@ func (s *PipelinesService) DeletePipeline(pid interface{}, pipeline int, options return s.client.Do(req, nil) } + +// UpdatePipelineMetadataOptions represents the available UpdatePipelineMetadata() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#update-pipeline-metadata +type UpdatePipelineMetadataOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// UpdatePipelineMetadata You can update the metadata of a pipeline. The metadata +// contains the name of the pipeline. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#update-pipeline-metadata +func (s *PipelinesService) UpdatePipelineMetadata(pid interface{}, pipeline int, opt *UpdatePipelineMetadataOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d/metadata", PathEscape(project), pipeline) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(Pipeline) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/plan_limits.go b/vendor/gitlab.com/gitlab-org/api/client-go/plan_limits.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/plan_limits.go rename to vendor/gitlab.com/gitlab-org/api/client-go/plan_limits.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_access_tokens.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_access_tokens.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_access_tokens.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_access_tokens.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_badges.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_badges.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_badges.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_badges.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_clusters.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_clusters.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_clusters.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_clusters.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_feature_flags.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_feature_flags.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_feature_flags.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_feature_flags.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_import_export.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_import_export.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_import_export.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_import_export.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_iterations.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_iterations.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_iterations.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_iterations.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_managed_licenses.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_managed_licenses.go diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/project_markdown_uploads.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_markdown_uploads.go new file mode 100644 index 00000000..3eb7ebce --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_markdown_uploads.go @@ -0,0 +1,211 @@ +// +// Copyright 2024, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "io" + "net/http" + "time" +) + +// ProjectMarkdownUploadsService handles communication with the project markdown uploads +// related methods of the GitLab API. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/project_markdown_uploads.html +type ProjectMarkdownUploadsService struct { + client *Client +} + +// ProjectMarkdownUploadedFile represents a single project markdown uploaded file. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/project_markdown_uploads.html +type ProjectMarkdownUploadedFile struct { + ID int `json:"id"` + Alt string `json:"alt"` + URL string `json:"url"` + FullPath string `json:"full_path"` + Markdown string `json:"markdown"` +} + +// ProjectMarkdownUpload represents a single project markdown upload. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/project_markdown_uploads.html +type ProjectMarkdownUpload struct { + ID int `json:"id"` + Size int `json:"size"` + Filename string `json:"filename"` + CreatedAt *time.Time `json:"created_at"` + UploadedBy *User `json:"uploaded_by"` +} + +// Gets a string representation of a ProjectMarkdownUpload. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_markdown_uploads.html +func (m ProjectMarkdownUpload) String() string { + return Stringify(m) +} + +// UploadProjectMarkdown uploads a markdown file to a project. +// +// GitLab docs: +// https://docs.gitlab.com/ee/api/project_markdown_uploads.html#upload-a-file +func (s *ProjectMarkdownUploadsService) UploadProjectMarkdown(pid interface{}, content io.Reader, filename string, options ...RequestOptionFunc) (*ProjectMarkdownUploadedFile, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/uploads", PathEscape(project)) + + req, err := s.client.UploadRequest( + http.MethodPost, + u, + content, + filename, + UploadFile, + nil, + options, + ) + if err != nil { + return nil, nil, err + } + + f := new(ProjectMarkdownUploadedFile) + resp, err := s.client.Do(req, f) + if err != nil { + return nil, resp, err + } + + return f, resp, nil +} + +// ListProjectMarkdownUploads gets all markdown uploads for a project. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/project_markdown_uploads.html#list-uploads +func (s *ProjectMarkdownUploadsService) ListProjectMarkdownUploads(pid interface{}, options ...RequestOptionFunc) ([]*ProjectMarkdownUpload, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/uploads", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var uploads []*ProjectMarkdownUpload + resp, err := s.client.Do(req, &uploads) + if err != nil { + return nil, resp, err + } + + return uploads, resp, err +} + +// DownloadProjectMarkdownUploadByID downloads a specific upload by ID. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/project_markdown_uploads.html#download-an-uploaded-file-by-id +func (s *ProjectMarkdownUploadsService) DownloadProjectMarkdownUploadByID(pid interface{}, uploadID int, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/uploads/%d", PathEscape(project), uploadID) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var f bytes.Buffer + resp, err := s.client.Do(req, &f) + if err != nil { + return nil, resp, err + } + + return f.Bytes(), resp, err +} + +// DownloadProjectMarkdownUploadBySecretAndFilename downloads a specific upload +// by secret and filename. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/project_markdown_uploads.html#download-an-uploaded-file-by-secret-and-filename +func (s *ProjectMarkdownUploadsService) DownloadProjectMarkdownUploadBySecretAndFilename(pid interface{}, secret string, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/uploads/%s/%s", PathEscape(project), PathEscape(secret), PathEscape(filename)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var f bytes.Buffer + resp, err := s.client.Do(req, &f) + if err != nil { + return nil, resp, err + } + + return f.Bytes(), resp, err +} + +// DeleteProjectMarkdownUploadByID deletes an upload by ID. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/project_markdown_uploads.html#delete-an-uploaded-file-by-id +func (s *ProjectMarkdownUploadsService) DeleteProjectMarkdownUploadByID(pid interface{}, uploadID int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/uploads/%d", PathEscape(project), uploadID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteProjectMarkdownUploadBySecretAndFilename deletes an upload +// by secret and filename. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/project_markdown_uploads.html#delete-an-uploaded-file-by-secret-and-filename +func (s *ProjectMarkdownUploadsService) DeleteProjectMarkdownUploadBySecretAndFilename(pid interface{}, secret string, filename string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/uploads/%s/%s", + PathEscape(project), PathEscape(secret), PathEscape(filename)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_members.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_members.go similarity index 91% rename from vendor/github.com/xanzy/go-gitlab/project_members.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_members.go index 37d4b8a2..c47a77ef 100644 --- a/vendor/github.com/xanzy/go-gitlab/project_members.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_members.go @@ -19,6 +19,7 @@ package gitlab import ( "fmt" "net/http" + "time" ) // ProjectMembersService handles communication with the project members @@ -29,6 +30,24 @@ type ProjectMembersService struct { client *Client } +// ProjectMember represents a project member. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html +type ProjectMember struct { + ID int `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + AccessLevel AccessLevelValue `json:"access_level"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + MemberRole *MemberRole `json:"member_role"` +} + // ListProjectMembersOptions represents the available ListProjectMembers() and // ListAllProjectMembers() options. // diff --git a/vendor/github.com/xanzy/go-gitlab/project_mirror.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_mirror.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_mirror.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_mirror.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_repository_storage_move.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_repository_storage_move.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_snippets.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_snippets.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_snippets.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_snippets.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_templates.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_templates.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_templates.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_templates.go diff --git a/vendor/github.com/xanzy/go-gitlab/project_variables.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_variables.go similarity index 98% rename from vendor/github.com/xanzy/go-gitlab/project_variables.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_variables.go index e75c7463..9dea091b 100644 --- a/vendor/github.com/xanzy/go-gitlab/project_variables.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/project_variables.go @@ -41,6 +41,7 @@ type ProjectVariable struct { VariableType VariableTypeValue `json:"variable_type"` Protected bool `json:"protected"` Masked bool `json:"masked"` + Hidden bool `json:"hidden"` Raw bool `json:"raw"` EnvironmentScope string `json:"environment_scope"` Description string `json:"description"` @@ -132,6 +133,7 @@ type CreateProjectVariableOptions struct { Description *string `url:"description,omitempty" json:"description,omitempty"` EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + MaskedAndHidden *bool `url:"masked_and_hidden,omitempty" json:"masked_and_hidden,omitempty"` Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go b/vendor/gitlab.com/gitlab-org/api/client-go/project_vulnerabilities.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go rename to vendor/gitlab.com/gitlab-org/api/client-go/project_vulnerabilities.go diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/gitlab.com/gitlab-org/api/client-go/projects.go similarity index 75% rename from vendor/github.com/xanzy/go-gitlab/projects.go rename to vendor/gitlab.com/gitlab-org/api/client-go/projects.go index 8edd9cc2..ae108d9f 100644 --- a/vendor/github.com/xanzy/go-gitlab/projects.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/projects.go @@ -17,6 +17,7 @@ package gitlab import ( + "bytes" "encoding/json" "fmt" "io" @@ -41,7 +42,6 @@ type Project struct { ID int `json:"id"` Description string `json:"description"` DefaultBranch string `json:"default_branch"` - Public bool `json:"public"` Visibility VisibilityValue `json:"visibility"` SSHURLToRepo string `json:"ssh_url_to_repo"` HTTPURLToRepo string `json:"http_url_to_repo"` @@ -67,6 +67,7 @@ type Project struct { ContainerRegistryAccessLevel AccessControlValue `json:"container_registry_access_level"` ContainerRegistryImagePrefix string `json:"container_registry_image_prefix,omitempty"` CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` LastActivityAt *time.Time `json:"last_activity_at,omitempty"` CreatorID int `json:"creator_id"` Namespace *ProjectNamespace `json:"namespace"` @@ -84,9 +85,11 @@ type Project struct { StarCount int `json:"star_count"` RunnersToken string `json:"runners_token"` AllowMergeOnSkippedPipeline bool `json:"allow_merge_on_skipped_pipeline"` + AllowPipelineTriggerApproveDeployment bool `json:"allow_pipeline_trigger_approve_deployment"` OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"` OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"` RemoveSourceBranchAfterMerge bool `json:"remove_source_branch_after_merge"` + PreventMergeWithoutJiraIssue bool `json:"prevent_merge_without_jira_issue"` PrintingMergeRequestLinkEnabled bool `json:"printing_merge_request_link_enabled"` LFSEnabled bool `json:"lfs_enabled"` RepositoryStorage string `json:"repository_storage"` @@ -127,48 +130,50 @@ type Project struct { GroupFullPath string `json:"group_full_path"` GroupAccessLevel int `json:"group_access_level"` } `json:"shared_with_groups"` - Statistics *Statistics `json:"statistics"` - Links *Links `json:"_links,omitempty"` - ImportURL string `json:"import_url"` - ImportType string `json:"import_type"` - ImportStatus string `json:"import_status"` - ImportError string `json:"import_error"` - CIDefaultGitDepth int `json:"ci_default_git_depth"` - CIForwardDeploymentEnabled bool `json:"ci_forward_deployment_enabled"` - CIForwardDeploymentRollbackAllowed bool `json:"ci_forward_deployment_rollback_allowed"` - CISeperateCache bool `json:"ci_separated_caches"` - CIJobTokenScopeEnabled bool `json:"ci_job_token_scope_enabled"` - CIOptInJWT bool `json:"ci_opt_in_jwt"` - CIAllowForkPipelinesToRunInParentProject bool `json:"ci_allow_fork_pipelines_to_run_in_parent_project"` - CIRestrictPipelineCancellationRole AccessControlValue `json:"ci_restrict_pipeline_cancellation_role"` - PublicJobs bool `json:"public_jobs"` - BuildTimeout int `json:"build_timeout"` - AutoCancelPendingPipelines string `json:"auto_cancel_pending_pipelines"` - CIConfigPath string `json:"ci_config_path"` - CustomAttributes []*CustomAttribute `json:"custom_attributes"` - ComplianceFrameworks []string `json:"compliance_frameworks"` - BuildCoverageRegex string `json:"build_coverage_regex"` - IssuesTemplate string `json:"issues_template"` - MergeRequestsTemplate string `json:"merge_requests_template"` - IssueBranchTemplate string `json:"issue_branch_template"` - KeepLatestArtifact bool `json:"keep_latest_artifact"` - MergePipelinesEnabled bool `json:"merge_pipelines_enabled"` - MergeTrainsEnabled bool `json:"merge_trains_enabled"` - RestrictUserDefinedVariables bool `json:"restrict_user_defined_variables"` - MergeCommitTemplate string `json:"merge_commit_template"` - SquashCommitTemplate string `json:"squash_commit_template"` - AutoDevopsDeployStrategy string `json:"auto_devops_deploy_strategy"` - AutoDevopsEnabled bool `json:"auto_devops_enabled"` - BuildGitStrategy string `json:"build_git_strategy"` - EmailsEnabled bool `json:"emails_enabled"` - ExternalAuthorizationClassificationLabel string `json:"external_authorization_classification_label"` - RequirementsEnabled bool `json:"requirements_enabled"` - RequirementsAccessLevel AccessControlValue `json:"requirements_access_level"` - SecurityAndComplianceEnabled bool `json:"security_and_compliance_enabled"` - SecurityAndComplianceAccessLevel AccessControlValue `json:"security_and_compliance_access_level"` - MergeRequestDefaultTargetSelf bool `json:"mr_default_target_self"` - ModelExperimentsAccessLevel AccessControlValue `json:"model_experiments_access_level"` - ModelRegistryAccessLevel AccessControlValue `json:"model_registry_access_level"` + Statistics *Statistics `json:"statistics"` + Links *Links `json:"_links,omitempty"` + ImportURL string `json:"import_url"` + ImportType string `json:"import_type"` + ImportStatus string `json:"import_status"` + ImportError string `json:"import_error"` + CIDefaultGitDepth int `json:"ci_default_git_depth"` + CIForwardDeploymentEnabled bool `json:"ci_forward_deployment_enabled"` + CIForwardDeploymentRollbackAllowed bool `json:"ci_forward_deployment_rollback_allowed"` + CISeperateCache bool `json:"ci_separated_caches"` + CIJobTokenScopeEnabled bool `json:"ci_job_token_scope_enabled"` + CIOptInJWT bool `json:"ci_opt_in_jwt"` + CIAllowForkPipelinesToRunInParentProject bool `json:"ci_allow_fork_pipelines_to_run_in_parent_project"` + CIRestrictPipelineCancellationRole AccessControlValue `json:"ci_restrict_pipeline_cancellation_role"` + PublicJobs bool `json:"public_jobs"` + BuildTimeout int `json:"build_timeout"` + AutoCancelPendingPipelines string `json:"auto_cancel_pending_pipelines"` + CIConfigPath string `json:"ci_config_path"` + CustomAttributes []*CustomAttribute `json:"custom_attributes"` + ComplianceFrameworks []string `json:"compliance_frameworks"` + BuildCoverageRegex string `json:"build_coverage_regex"` + IssuesTemplate string `json:"issues_template"` + MergeRequestsTemplate string `json:"merge_requests_template"` + IssueBranchTemplate string `json:"issue_branch_template"` + KeepLatestArtifact bool `json:"keep_latest_artifact"` + MergePipelinesEnabled bool `json:"merge_pipelines_enabled"` + MergeTrainsEnabled bool `json:"merge_trains_enabled"` + RestrictUserDefinedVariables bool `json:"restrict_user_defined_variables"` + CIPipelineVariablesMinimumOverrideRole CIPipelineVariablesMinimumOverrideRoleValue `json:"ci_pipeline_variables_minimum_override_role"` + MergeCommitTemplate string `json:"merge_commit_template"` + SquashCommitTemplate string `json:"squash_commit_template"` + AutoDevopsDeployStrategy string `json:"auto_devops_deploy_strategy"` + AutoDevopsEnabled bool `json:"auto_devops_enabled"` + BuildGitStrategy string `json:"build_git_strategy"` + EmailsEnabled bool `json:"emails_enabled"` + ExternalAuthorizationClassificationLabel string `json:"external_authorization_classification_label"` + RequirementsEnabled bool `json:"requirements_enabled"` + RequirementsAccessLevel AccessControlValue `json:"requirements_access_level"` + SecurityAndComplianceEnabled bool `json:"security_and_compliance_enabled"` + SecurityAndComplianceAccessLevel AccessControlValue `json:"security_and_compliance_access_level"` + MergeRequestDefaultTargetSelf bool `json:"mr_default_target_self"` + ModelExperimentsAccessLevel AccessControlValue `json:"model_experiments_access_level"` + ModelRegistryAccessLevel AccessControlValue `json:"model_registry_access_level"` + PreReceiveSecretDetectionEnabled bool `json:"pre_receive_secret_detection_enabled"` // Deprecated: Use EmailsEnabled instead EmailsDisabled bool `json:"emails_disabled"` @@ -293,6 +298,7 @@ type Statistics struct { PackagesSize int64 `json:"packages_size"` SnippetsSize int64 `json:"snippets_size"` UploadsSize int64 `json:"uploads_size"` + ContainerRegistrySize int64 `json:"container_registry_size"` } func (s Project) String() string { @@ -307,6 +313,7 @@ type ProjectApprovalRule struct { ID int `json:"id"` Name string `json:"name"` RuleType string `json:"rule_type"` + ReportType string `json:"report_type"` EligibleApprovers []*BasicUser `json:"eligible_approvers"` ApprovalsRequired int `json:"approvals_required"` Users []*BasicUser `json:"users"` @@ -829,87 +836,90 @@ func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUs // // GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#edit-project type EditProjectOptions struct { - AllowMergeOnSkippedPipeline *bool `url:"allow_merge_on_skipped_pipeline,omitempty" json:"allow_merge_on_skipped_pipeline,omitempty"` - OnlyAllowMergeIfAllStatusChecksPassed *bool `url:"only_allow_merge_if_all_status_checks_passed,omitempty" json:"only_allow_merge_if_all_status_checks_passed,omitempty"` - AnalyticsAccessLevel *AccessControlValue `url:"analytics_access_level,omitempty" json:"analytics_access_level,omitempty"` - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` - AutoCancelPendingPipelines *string `url:"auto_cancel_pending_pipelines,omitempty" json:"auto_cancel_pending_pipelines,omitempty"` - AutoDevopsDeployStrategy *string `url:"auto_devops_deploy_strategy,omitempty" json:"auto_devops_deploy_strategy,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - AutocloseReferencedIssues *bool `url:"autoclose_referenced_issues,omitempty" json:"autoclose_referenced_issues,omitempty"` - Avatar *ProjectAvatar `url:"-" json:"avatar,omitempty"` - BuildCoverageRegex *string `url:"build_coverage_regex,omitempty" json:"build_coverage_regex,omitempty"` - BuildGitStrategy *string `url:"build_git_strategy,omitempty" json:"build_git_strategy,omitempty"` - BuildTimeout *int `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` - BuildsAccessLevel *AccessControlValue `url:"builds_access_level,omitempty" json:"builds_access_level,omitempty"` - CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` - CIDefaultGitDepth *int `url:"ci_default_git_depth,omitempty" json:"ci_default_git_depth,omitempty"` - CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"` - CIForwardDeploymentRollbackAllowed *bool `url:"ci_forward_deployment_rollback_allowed,omitempty" json:"ci_forward_deployment_rollback_allowed,omitempty"` - CISeperateCache *bool `url:"ci_separated_caches,omitempty" json:"ci_separated_caches,omitempty"` - CIRestrictPipelineCancellationRole *AccessControlValue `url:"ci_restrict_pipeline_cancellation_role,omitempty" json:"ci_restrict_pipeline_cancellation_role,omitempty"` - ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` - ContainerRegistryAccessLevel *AccessControlValue `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"` - DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` - EnforceAuthChecksOnUploads *bool `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"` - ExternalAuthorizationClassificationLabel *string `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"` - ForkingAccessLevel *AccessControlValue `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"` - ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"` - IssuesAccessLevel *AccessControlValue `url:"issues_access_level,omitempty" json:"issues_access_level,omitempty"` - IssueBranchTemplate *string `url:"issue_branch_template,omitempty" json:"issue_branch_template,omitempty"` - IssuesTemplate *string `url:"issues_template,omitempty" json:"issues_template,omitempty"` - KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - MergeCommitTemplate *string `url:"merge_commit_template,omitempty" json:"merge_commit_template,omitempty"` - MergeRequestDefaultTargetSelf *bool `url:"mr_default_target_self,omitempty" json:"mr_default_target_self,omitempty"` - MergeMethod *MergeMethodValue `url:"merge_method,omitempty" json:"merge_method,omitempty"` - MergePipelinesEnabled *bool `url:"merge_pipelines_enabled,omitempty" json:"merge_pipelines_enabled,omitempty"` - MergeRequestsAccessLevel *AccessControlValue `url:"merge_requests_access_level,omitempty" json:"merge_requests_access_level,omitempty"` - MergeRequestsTemplate *string `url:"merge_requests_template,omitempty" json:"merge_requests_template,omitempty"` - MergeTrainsEnabled *bool `url:"merge_trains_enabled,omitempty" json:"merge_trains_enabled,omitempty"` - Mirror *bool `url:"mirror,omitempty" json:"mirror,omitempty"` - MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` - MirrorOverwritesDivergedBranches *bool `url:"mirror_overwrites_diverged_branches,omitempty" json:"mirror_overwrites_diverged_branches,omitempty"` - MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` - MirrorUserID *int `url:"mirror_user_id,omitempty" json:"mirror_user_id,omitempty"` - ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` - ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` - OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` - OnlyMirrorProtectedBranches *bool `url:"only_mirror_protected_branches,omitempty" json:"only_mirror_protected_branches,omitempty"` - OperationsAccessLevel *AccessControlValue `url:"operations_access_level,omitempty" json:"operations_access_level,omitempty"` - PackagesEnabled *bool `url:"packages_enabled,omitempty" json:"packages_enabled,omitempty"` - PagesAccessLevel *AccessControlValue `url:"pages_access_level,omitempty" json:"pages_access_level,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"` - ReleasesAccessLevel *AccessControlValue `url:"releases_access_level,omitempty" json:"releases_access_level,omitempty"` - EnvironmentsAccessLevel *AccessControlValue `url:"environments_access_level,omitempty" json:"environments_access_level,omitempty"` - FeatureFlagsAccessLevel *AccessControlValue `url:"feature_flags_access_level,omitempty" json:"feature_flags_access_level,omitempty"` - InfrastructureAccessLevel *AccessControlValue `url:"infrastructure_access_level,omitempty" json:"infrastructure_access_level,omitempty"` - MonitorAccessLevel *AccessControlValue `url:"monitor_access_level,omitempty" json:"monitor_access_level,omitempty"` - RemoveSourceBranchAfterMerge *bool `url:"remove_source_branch_after_merge,omitempty" json:"remove_source_branch_after_merge,omitempty"` - PrintingMergeRequestLinkEnabled *bool `url:"printing_merge_request_link_enabled,omitempty" json:"printing_merge_request_link_enabled,omitempty"` - RepositoryAccessLevel *AccessControlValue `url:"repository_access_level,omitempty" json:"repository_access_level,omitempty"` - RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - RequirementsAccessLevel *AccessControlValue `url:"requirements_access_level,omitempty" json:"requirements_access_level,omitempty"` - ResolveOutdatedDiffDiscussions *bool `url:"resolve_outdated_diff_discussions,omitempty" json:"resolve_outdated_diff_discussions,omitempty"` - RestrictUserDefinedVariables *bool `url:"restrict_user_defined_variables,omitempty" json:"restrict_user_defined_variables,omitempty"` - SecurityAndComplianceAccessLevel *AccessControlValue `url:"security_and_compliance_access_level,omitempty" json:"security_and_compliance_access_level,omitempty"` - ServiceDeskEnabled *bool `url:"service_desk_enabled,omitempty" json:"service_desk_enabled,omitempty"` - SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` - GroupRunnersEnabled *bool `url:"group_runners_enabled,omitempty" json:"group_runners_enabled,omitempty"` - ShowDefaultAwardEmojis *bool `url:"show_default_award_emojis,omitempty" json:"show_default_award_emojis,omitempty"` - SnippetsAccessLevel *AccessControlValue `url:"snippets_access_level,omitempty" json:"snippets_access_level,omitempty"` - SquashCommitTemplate *string `url:"squash_commit_template,omitempty" json:"squash_commit_template,omitempty"` - SquashOption *SquashOptionValue `url:"squash_option,omitempty" json:"squash_option,omitempty"` - SuggestionCommitMessage *string `url:"suggestion_commit_message,omitempty" json:"suggestion_commit_message,omitempty"` - Topics *[]string `url:"topics,omitempty" json:"topics,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + AllowMergeOnSkippedPipeline *bool `url:"allow_merge_on_skipped_pipeline,omitempty" json:"allow_merge_on_skipped_pipeline,omitempty"` + AllowPipelineTriggerApproveDeployment *bool `url:"allow_pipeline_trigger_approve_deployment,omitempty" json:"allow_pipeline_trigger_approve_deployment,omitempty"` + OnlyAllowMergeIfAllStatusChecksPassed *bool `url:"only_allow_merge_if_all_status_checks_passed,omitempty" json:"only_allow_merge_if_all_status_checks_passed,omitempty"` + AnalyticsAccessLevel *AccessControlValue `url:"analytics_access_level,omitempty" json:"analytics_access_level,omitempty"` + ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` + AutoCancelPendingPipelines *string `url:"auto_cancel_pending_pipelines,omitempty" json:"auto_cancel_pending_pipelines,omitempty"` + AutoDevopsDeployStrategy *string `url:"auto_devops_deploy_strategy,omitempty" json:"auto_devops_deploy_strategy,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + AutocloseReferencedIssues *bool `url:"autoclose_referenced_issues,omitempty" json:"autoclose_referenced_issues,omitempty"` + Avatar *ProjectAvatar `url:"-" json:"avatar,omitempty"` + BuildCoverageRegex *string `url:"build_coverage_regex,omitempty" json:"build_coverage_regex,omitempty"` + BuildGitStrategy *string `url:"build_git_strategy,omitempty" json:"build_git_strategy,omitempty"` + BuildTimeout *int `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` + BuildsAccessLevel *AccessControlValue `url:"builds_access_level,omitempty" json:"builds_access_level,omitempty"` + CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` + CIDefaultGitDepth *int `url:"ci_default_git_depth,omitempty" json:"ci_default_git_depth,omitempty"` + CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"` + CIForwardDeploymentRollbackAllowed *bool `url:"ci_forward_deployment_rollback_allowed,omitempty" json:"ci_forward_deployment_rollback_allowed,omitempty"` + CISeperateCache *bool `url:"ci_separated_caches,omitempty" json:"ci_separated_caches,omitempty"` + CIRestrictPipelineCancellationRole *AccessControlValue `url:"ci_restrict_pipeline_cancellation_role,omitempty" json:"ci_restrict_pipeline_cancellation_role,omitempty"` + CIPipelineVariablesMinimumOverrideRole *CIPipelineVariablesMinimumOverrideRoleValue `url:"ci_pipeline_variables_minimum_override_role,omitempty" json:"ci_pipeline_variables_minimum_override_role,omitempty"` + ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` + ContainerRegistryAccessLevel *AccessControlValue `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"` + DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + EnforceAuthChecksOnUploads *bool `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"` + ExternalAuthorizationClassificationLabel *string `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"` + ForkingAccessLevel *AccessControlValue `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"` + ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"` + IssuesAccessLevel *AccessControlValue `url:"issues_access_level,omitempty" json:"issues_access_level,omitempty"` + IssueBranchTemplate *string `url:"issue_branch_template,omitempty" json:"issue_branch_template,omitempty"` + IssuesTemplate *string `url:"issues_template,omitempty" json:"issues_template,omitempty"` + KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + MergeCommitTemplate *string `url:"merge_commit_template,omitempty" json:"merge_commit_template,omitempty"` + MergeRequestDefaultTargetSelf *bool `url:"mr_default_target_self,omitempty" json:"mr_default_target_self,omitempty"` + MergeMethod *MergeMethodValue `url:"merge_method,omitempty" json:"merge_method,omitempty"` + MergePipelinesEnabled *bool `url:"merge_pipelines_enabled,omitempty" json:"merge_pipelines_enabled,omitempty"` + MergeRequestsAccessLevel *AccessControlValue `url:"merge_requests_access_level,omitempty" json:"merge_requests_access_level,omitempty"` + MergeRequestsTemplate *string `url:"merge_requests_template,omitempty" json:"merge_requests_template,omitempty"` + MergeTrainsEnabled *bool `url:"merge_trains_enabled,omitempty" json:"merge_trains_enabled,omitempty"` + Mirror *bool `url:"mirror,omitempty" json:"mirror,omitempty"` + MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` + MirrorOverwritesDivergedBranches *bool `url:"mirror_overwrites_diverged_branches,omitempty" json:"mirror_overwrites_diverged_branches,omitempty"` + MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` + MirrorUserID *int `url:"mirror_user_id,omitempty" json:"mirror_user_id,omitempty"` + ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` + ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` + OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` + OnlyMirrorProtectedBranches *bool `url:"only_mirror_protected_branches,omitempty" json:"only_mirror_protected_branches,omitempty"` + OperationsAccessLevel *AccessControlValue `url:"operations_access_level,omitempty" json:"operations_access_level,omitempty"` + PackagesEnabled *bool `url:"packages_enabled,omitempty" json:"packages_enabled,omitempty"` + PagesAccessLevel *AccessControlValue `url:"pages_access_level,omitempty" json:"pages_access_level,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"` + ReleasesAccessLevel *AccessControlValue `url:"releases_access_level,omitempty" json:"releases_access_level,omitempty"` + EnvironmentsAccessLevel *AccessControlValue `url:"environments_access_level,omitempty" json:"environments_access_level,omitempty"` + FeatureFlagsAccessLevel *AccessControlValue `url:"feature_flags_access_level,omitempty" json:"feature_flags_access_level,omitempty"` + InfrastructureAccessLevel *AccessControlValue `url:"infrastructure_access_level,omitempty" json:"infrastructure_access_level,omitempty"` + MonitorAccessLevel *AccessControlValue `url:"monitor_access_level,omitempty" json:"monitor_access_level,omitempty"` + RemoveSourceBranchAfterMerge *bool `url:"remove_source_branch_after_merge,omitempty" json:"remove_source_branch_after_merge,omitempty"` + PreventMergeWithoutJiraIssue *bool `url:"prevent_merge_without_jira_issue,omitempty" json:"prevent_merge_without_jira_issue,omitempty"` + PrintingMergeRequestLinkEnabled *bool `url:"printing_merge_request_link_enabled,omitempty" json:"printing_merge_request_link_enabled,omitempty"` + RepositoryAccessLevel *AccessControlValue `url:"repository_access_level,omitempty" json:"repository_access_level,omitempty"` + RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + RequirementsAccessLevel *AccessControlValue `url:"requirements_access_level,omitempty" json:"requirements_access_level,omitempty"` + ResolveOutdatedDiffDiscussions *bool `url:"resolve_outdated_diff_discussions,omitempty" json:"resolve_outdated_diff_discussions,omitempty"` + RestrictUserDefinedVariables *bool `url:"restrict_user_defined_variables,omitempty" json:"restrict_user_defined_variables,omitempty"` + SecurityAndComplianceAccessLevel *AccessControlValue `url:"security_and_compliance_access_level,omitempty" json:"security_and_compliance_access_level,omitempty"` + ServiceDeskEnabled *bool `url:"service_desk_enabled,omitempty" json:"service_desk_enabled,omitempty"` + SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` + GroupRunnersEnabled *bool `url:"group_runners_enabled,omitempty" json:"group_runners_enabled,omitempty"` + ShowDefaultAwardEmojis *bool `url:"show_default_award_emojis,omitempty" json:"show_default_award_emojis,omitempty"` + SnippetsAccessLevel *AccessControlValue `url:"snippets_access_level,omitempty" json:"snippets_access_level,omitempty"` + SquashCommitTemplate *string `url:"squash_commit_template,omitempty" json:"squash_commit_template,omitempty"` + SquashOption *SquashOptionValue `url:"squash_option,omitempty" json:"squash_option,omitempty"` + SuggestionCommitMessage *string `url:"suggestion_commit_message,omitempty" json:"suggestion_commit_message,omitempty"` + Topics *[]string `url:"topics,omitempty" json:"topics,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` // Deprecated: Use ContainerRegistryAccessLevel instead. ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"` @@ -1039,6 +1049,44 @@ func (s *ProjectsService) StarProject(pid interface{}, options ...RequestOptionF return p, resp, nil } +// ListProjectInvidedGroupOptions represents the available +// ListProjectsInvitedGroups() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +type ListProjectInvidedGroupOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` + Relation *[]string `url:"relation,omitempty" json:"relation,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` +} + +// ListProjectsInvitedGroups lists invited groups of a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +func (s *ProjectsService) ListProjectsInvitedGroups(pid interface{}, opt *ListProjectInvidedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/invited_groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pg []*ProjectGroup + resp, err := s.client.Do(req, &pg) + if err != nil { + return nil, resp, err + } + + return pg, resp, nil +} + // UnstarProject unstars a given project. // // GitLab API docs: @@ -1116,18 +1164,28 @@ func (s *ProjectsService) UnarchiveProject(pid interface{}, options ...RequestOp return p, resp, nil } +// DeleteProjectOptions represents the available DeleteProject() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-project +type DeleteProjectOptions struct { + FullPath *string `url:"full_path" json:"full_path"` + PermanentlyRemove *bool `url:"permanently_remove" json:"permanently_remove"` +} + // DeleteProject removes a project including all associated resources // (issues, merge requests etc.) // -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#delete-project -func (s *ProjectsService) DeleteProject(pid interface{}, options ...RequestOptionFunc) (*Response, error) { +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-project +func (s *ProjectsService) DeleteProject(pid interface{}, opt *DeleteProjectOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { return nil, err } u := fmt.Sprintf("projects/%s", PathEscape(project)) - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) if err != nil { return nil, err } @@ -1135,7 +1193,7 @@ func (s *ProjectsService) DeleteProject(pid interface{}, options ...RequestOptio return s.client.Do(req, nil) } -// ShareWithGroupOptions represents options to share project with groups +// ShareWithGroupOptions represents the available SharedWithGroup() options. // // GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#share-project-with-group type ShareWithGroupOptions struct { @@ -1180,21 +1238,13 @@ func (s *ProjectsService) DeleteSharedProjectFromGroup(pid interface{}, groupID return s.client.Do(req, nil) } -// ProjectMember represents a project member. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -type ProjectMember struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - AccessLevel AccessLevelValue `json:"access_level"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` +// HookCustomHeader represents a project or group hook custom header +// Note: "Key" is returned from the Get operation, but "Value" is not +// The List operation doesn't return any headers at all for Projects, +// but does return headers for Groups +type HookCustomHeader struct { + Key string `json:"key"` + Value string `json:"value"` } // ProjectHook represents a project hook. @@ -1202,25 +1252,30 @@ type ProjectMember struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/projects.html#list-project-hooks type ProjectHook struct { - ID int `json:"id"` - URL string `json:"url"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - ProjectID int `json:"project_id"` - PushEvents bool `json:"push_events"` - PushEventsBranchFilter string `json:"push_events_branch_filter"` - IssuesEvents bool `json:"issues_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - TagPushEvents bool `json:"tag_push_events"` - NoteEvents bool `json:"note_events"` - JobEvents bool `json:"job_events"` - PipelineEvents bool `json:"pipeline_events"` - WikiPageEvents bool `json:"wiki_page_events"` - DeploymentEvents bool `json:"deployment_events"` - ReleasesEvents bool `json:"releases_events"` - EnableSSLVerification bool `json:"enable_ssl_verification"` - CreatedAt *time.Time `json:"created_at"` - CustomWebhookTemplate string `json:"custom_webhook_template"` + ID int `json:"id"` + URL string `json:"url"` + Name string `json:"name"` + Description string `json:"description"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + ProjectID int `json:"project_id"` + PushEvents bool `json:"push_events"` + PushEventsBranchFilter string `json:"push_events_branch_filter"` + IssuesEvents bool `json:"issues_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + TagPushEvents bool `json:"tag_push_events"` + NoteEvents bool `json:"note_events"` + JobEvents bool `json:"job_events"` + PipelineEvents bool `json:"pipeline_events"` + WikiPageEvents bool `json:"wiki_page_events"` + DeploymentEvents bool `json:"deployment_events"` + ReleasesEvents bool `json:"releases_events"` + EnableSSLVerification bool `json:"enable_ssl_verification"` + AlertStatus string `json:"alert_status"` + CreatedAt *time.Time `json:"created_at"` + ResourceAccessTokenEvents bool `json:"resource_access_token_events"` + CustomWebhookTemplate string `json:"custom_webhook_template"` + CustomHeaders []*HookCustomHeader `json:"custom_headers"` } // ListProjectHooksOptions represents the available ListProjectHooks() options. @@ -1283,23 +1338,27 @@ func (s *ProjectsService) GetProjectHook(pid interface{}, hook int, options ...R // GitLab API docs: // https://docs.gitlab.com/ee/api/projects.html#add-project-hook type AddProjectHookOptions struct { - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` } // AddProjectHook adds a hook to a specified project. @@ -1332,23 +1391,27 @@ func (s *ProjectsService) AddProjectHook(pid interface{}, opt *AddProjectHookOpt // GitLab API docs: // https://docs.gitlab.com/ee/api/projects.html#edit-project-hook type EditProjectHookOptions struct { - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` } // EditProjectHook edits a hook for a specified project. @@ -1396,6 +1459,80 @@ func (s *ProjectsService) DeleteProjectHook(pid interface{}, hook int, options . return s.client.Do(req, nil) } +// TriggerTestProjectHook Trigger a test hook for a specified project. +// +// In GitLab 17.0 and later, this endpoint has a special rate limit. +// In GitLab 17.0 the rate was three requests per minute for each project hook. +// In GitLab 17.1 this was changed to five requests per minute for each project +// and authenticated user. +// +// To disable this limit on self-managed GitLab and GitLab Dedicated, +// an administrator can disable the feature flag named web_hook_test_api_endpoint_rate_limit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#trigger-a-test-project-hook +func (s *ProjectsService) TriggerTestProjectHook(pid interface{}, hook int, event ProjectHookEvent, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/test/%s", PathEscape(project), hook, string(event)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetHookCustomHeaderOptions represents the available SetProjectCustomHeader() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header +type SetHookCustomHeaderOptions struct { + Value *string `json:"value,omitempty"` +} + +// SetProjectCustomHeader creates or updates a project custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header +func (s *ProjectsService) SetProjectCustomHeader(pid interface{}, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteProjectCustomHeader deletes a project custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-a-custom-header +func (s *ProjectsService) DeleteProjectCustomHeader(pid interface{}, hook int, key string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // ProjectForkRelation represents a project fork relationship. // // GitLab API docs: @@ -1459,12 +1596,17 @@ func (s *ProjectsService) DeleteProjectForkRelation(pid interface{}, options ... type ProjectFile struct { Alt string `json:"alt"` URL string `json:"url"` + FullPath string `json:"full_path"` Markdown string `json:"markdown"` } // UploadFile uploads a file. // -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#upload-a-file +// Deprecated: UploadFile is deprecated and will be removed in a future release. +// Use [ProjectMarkdownUploadsService.UploadProjectMarkdown] instead for uploading +// markdown files to a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_markdown_uploads.html#upload-a-file func (s *ProjectsService) UploadFile(pid interface{}, content io.Reader, filename string, options ...RequestOptionFunc) (*ProjectFile, *Response, error) { project, err := parseID(pid) if err != nil { @@ -1527,6 +1669,31 @@ func (s *ProjectsService) UploadAvatar(pid interface{}, avatar io.Reader, filena return p, resp, nil } +// DownloadAvatar downloads an avatar. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#download-a-project-avatar +func (s *ProjectsService) DownloadAvatar(pid interface{}, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/avatar", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + avatar := new(bytes.Buffer) + resp, err := s.client.Do(req, avatar) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(avatar.Bytes()), resp, err +} + // ListProjectForks gets a list of project forks. // // GitLab API docs: @@ -1570,7 +1737,9 @@ type ProjectPushRules struct { FileNameRegex string `json:"file_name_regex"` MaxFileSize int `json:"max_file_size"` CommitCommitterCheck bool `json:"commit_committer_check"` + CommitCommitterNameCheck bool `json:"commit_committer_name_check"` RejectUnsignedCommits bool `json:"reject_unsigned_commits"` + RejectNonDCOCommits bool `json:"reject_non_dco_commits"` } // GetProjectPushRules gets the push rules of a project. @@ -1607,6 +1776,7 @@ type AddProjectPushRuleOptions struct { AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` @@ -1615,6 +1785,7 @@ type AddProjectPushRuleOptions struct { MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } // AddProjectPushRule adds a push rule to a specified project. @@ -1651,6 +1822,7 @@ type EditProjectPushRuleOptions struct { AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` @@ -1659,6 +1831,7 @@ type EditProjectPushRuleOptions struct { MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } // EditProjectPushRule edits a push rule for a specified project. @@ -1788,9 +1961,11 @@ func (s *ProjectsService) ChangeApprovalConfiguration(pid interface{}, opt *Chan return pa, resp, nil } -// GetProjectApprovalRulesListsOptions represents the available GetProjectApprovalRules() options. +// GetProjectApprovalRulesListsOptions represents the available +// GetProjectApprovalRules() options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules type GetProjectApprovalRulesListsOptions ListOptions // GetProjectApprovalRules looks up the list of project level approver rules. @@ -2043,7 +2218,8 @@ func (s *ProjectsService) StartMirroringProject(pid interface{}, options ...Requ // TransferProjectOptions represents the available TransferProject() options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#transfer-a-project-to-a-new-namespace +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#transfer-a-project-to-a-new-namespace type TransferProjectOptions struct { Namespace interface{} `url:"namespace,omitempty" json:"namespace,omitempty"` } diff --git a/vendor/github.com/xanzy/go-gitlab/protected_branches.go b/vendor/gitlab.com/gitlab-org/api/client-go/protected_branches.go similarity index 99% rename from vendor/github.com/xanzy/go-gitlab/protected_branches.go rename to vendor/gitlab.com/gitlab-org/api/client-go/protected_branches.go index d13f57a6..e88c7aea 100644 --- a/vendor/github.com/xanzy/go-gitlab/protected_branches.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/protected_branches.go @@ -54,6 +54,7 @@ type BranchAccessDescription struct { ID int `json:"id"` AccessLevel AccessLevelValue `json:"access_level"` AccessLevelDescription string `json:"access_level_description"` + DeployKeyID int `json:"deploy_key_id"` UserID int `json:"user_id"` GroupID int `json:"group_id"` } diff --git a/vendor/github.com/xanzy/go-gitlab/protected_environments.go b/vendor/gitlab.com/gitlab-org/api/client-go/protected_environments.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/protected_environments.go rename to vendor/gitlab.com/gitlab-org/api/client-go/protected_environments.go diff --git a/vendor/github.com/xanzy/go-gitlab/protected_tags.go b/vendor/gitlab.com/gitlab-org/api/client-go/protected_tags.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/protected_tags.go rename to vendor/gitlab.com/gitlab-org/api/client-go/protected_tags.go diff --git a/vendor/github.com/xanzy/go-gitlab/releaselinks.go b/vendor/gitlab.com/gitlab-org/api/client-go/releaselinks.go similarity index 86% rename from vendor/github.com/xanzy/go-gitlab/releaselinks.go rename to vendor/gitlab.com/gitlab-org/api/client-go/releaselinks.go index f7dd05f1..8cde15f5 100644 --- a/vendor/github.com/xanzy/go-gitlab/releaselinks.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/releaselinks.go @@ -101,10 +101,11 @@ func (s *ReleaseLinksService) GetReleaseLink(pid interface{}, tagName string, li // // GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#create-a-release-link type CreateReleaseLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` } // CreateReleaseLink creates a link. @@ -137,10 +138,11 @@ func (s *ReleaseLinksService) CreateReleaseLink(pid interface{}, tagName string, // // GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#update-a-release-link type UpdateReleaseLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` } // UpdateReleaseLink updates an asset link. diff --git a/vendor/github.com/xanzy/go-gitlab/releases.go b/vendor/gitlab.com/gitlab-org/api/client-go/releases.go similarity index 78% rename from vendor/github.com/xanzy/go-gitlab/releases.go rename to vendor/gitlab.com/gitlab-org/api/client-go/releases.go index 3d6a8977..26e37cb4 100644 --- a/vendor/github.com/xanzy/go-gitlab/releases.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/releases.go @@ -49,19 +49,22 @@ type Release struct { AvatarURL string `json:"avatar_url"` WebURL string `json:"web_url"` } `json:"author"` - Commit Commit `json:"commit"` - UpcomingRelease bool `json:"upcoming_release"` - CommitPath string `json:"commit_path"` - TagPath string `json:"tag_path"` + Commit Commit `json:"commit"` + Milestones []*ReleaseMilestone `json:"milestones"` + UpcomingRelease bool `json:"upcoming_release"` + CommitPath string `json:"commit_path"` + TagPath string `json:"tag_path"` Assets struct { Count int `json:"count"` Sources []struct { Format string `json:"format"` URL string `json:"url"` } `json:"sources"` - Links []*ReleaseLink `json:"links"` + Links []*ReleaseLink `json:"links"` + EvidenceFilePath string `json:"evidence_file_path"` } `json:"assets"` - Links struct { + Evidences []*ReleaseEvidence `json:"evidences"` + Links struct { ClosedIssueURL string `json:"closed_issues_url"` ClosedMergeRequest string `json:"closed_merge_requests_url"` EditURL string `json:"edit_url"` @@ -72,6 +75,45 @@ type Release struct { } `json:"_links"` } +// ReleaseMilestone represents a project release milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#list-releases +type ReleaseMilestone struct { + ID int `json:"id"` + IID int `json:"iid"` + ProjectID int `json:"project_id"` + Title string `json:"title"` + Description string `json:"description"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + DueDate *ISOTime `json:"due_date"` + StartDate *ISOTime `json:"start_date"` + WebURL string `json:"web_url"` + IssueStats *ReleaseMilestoneIssueStats `json:"issue_stats"` +} + +// ReleaseMilestoneIssueStats represents a project release milestone's +// related issues statistics. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#list-releases +type ReleaseMilestoneIssueStats struct { + Total int `json:"total"` + Closed int `json:"closed"` +} + +// ReleaseEvidence represents a project release's evidence. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#list-releases +type ReleaseEvidence struct { + SHA string `json:"sha"` + Filepath string `json:"filepath"` + CollectedAt *time.Time `json:"collected_at"` +} + // ListReleasesOptions represents ListReleases() options. // // GitLab API docs: @@ -187,10 +229,11 @@ type ReleaseAssetsOptions struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/releases/index.html#create-a-release type ReleaseAssetLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` } // CreateRelease creates a release. diff --git a/vendor/github.com/xanzy/go-gitlab/repositories.go b/vendor/gitlab.com/gitlab-org/api/client-go/repositories.go similarity index 99% rename from vendor/github.com/xanzy/go-gitlab/repositories.go rename to vendor/gitlab.com/gitlab-org/api/client-go/repositories.go index dde87617..3c59f9ab 100644 --- a/vendor/github.com/xanzy/go-gitlab/repositories.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/repositories.go @@ -209,6 +209,7 @@ type Compare struct { Diffs []*Diff `json:"diffs"` CompareTimeout bool `json:"compare_timeout"` CompareSameRef bool `json:"compare_same_ref"` + WebURL string `json:"web_url"` } func (c Compare) String() string { diff --git a/vendor/github.com/xanzy/go-gitlab/repository_files.go b/vendor/gitlab.com/gitlab-org/api/client-go/repository_files.go similarity index 99% rename from vendor/github.com/xanzy/go-gitlab/repository_files.go rename to vendor/gitlab.com/gitlab-org/api/client-go/repository_files.go index 2f58b39a..7ffaa93b 100644 --- a/vendor/github.com/xanzy/go-gitlab/repository_files.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/repository_files.go @@ -215,6 +215,7 @@ func (s *RepositoryFilesService) GetFileBlame(pid interface{}, file string, opt // https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository type GetRawFileOptions struct { Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + LFS *bool `url:"lfs,omitempty" json:"lfs,omitempty"` } // GetRawFile allows you to receive the raw file in repository. diff --git a/vendor/github.com/xanzy/go-gitlab/repository_submodules.go b/vendor/gitlab.com/gitlab-org/api/client-go/repository_submodules.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/repository_submodules.go rename to vendor/gitlab.com/gitlab-org/api/client-go/repository_submodules.go diff --git a/vendor/github.com/xanzy/go-gitlab/request_options.go b/vendor/gitlab.com/gitlab-org/api/client-go/request_options.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/request_options.go rename to vendor/gitlab.com/gitlab-org/api/client-go/request_options.go diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/resource_group.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_group.go new file mode 100644 index 00000000..b11cd8be --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/resource_group.go @@ -0,0 +1,165 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceGroupService handles communication with the resource +// group related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +type ResourceGroupService struct { + client *Client +} + +// ResourceGrouop represents a GitLab Project Resource Group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +type ResourceGroup struct { + ID int `json:"id"` + Key string `json:"key"` + ProcessMode string `json:"process_mode"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// Gets a string representation of a ResourceGroup +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +func (rg ResourceGroup) String() string { + return Stringify(rg) +} + +// GetAllResourceGroupsForAProject allows you to get all resource +// groups associated with a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#get-all-resource-groups-for-a-project +func (s *ResourceGroupService) GetAllResourceGroupsForAProject(pid interface{}, options ...RequestOptionFunc) ([]*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var rgs []*ResourceGroup + resp, err := s.client.Do(req, &rgs) + if err != nil { + return nil, resp, err + } + + return rgs, resp, nil +} + +// GetASpecificResourceGroup allows you to get a specific +// resource group for a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#get-a-specific-resource-group +func (s *ResourceGroupService) GetASpecificResourceGroup(pid interface{}, key string, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + rg := new(ResourceGroup) + resp, err := s.client.Do(req, rg) + if err != nil { + return nil, resp, err + } + + return rg, resp, nil +} + +// ListUpcomingJobsForASpecificResourceGroup allows you to get all +// upcoming jobs for a specific resource group for a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#list-upcoming-jobs-for-a-specific-resource-group +func (s *ResourceGroupService) ListUpcomingJobsForASpecificResourceGroup(pid interface{}, key string, options ...RequestOptionFunc) ([]*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s/upcoming_jobs", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var js []*Job + resp, err := s.client.Do(req, &js) + if err != nil { + return nil, resp, err + } + + return js, resp, nil +} + +// EditAnExistingResourceGroupOptions represents the available +// EditAnExistingResourceGroup options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#edit-an-existing-resource-group +type EditAnExistingResourceGroupOptions struct { + ProcessMode *ResourceGroupProcessMode `url:"process_mode,omitempty" json:"process_mode,omitempty"` +} + +// EditAnExistingResourceGroup allows you to edit a specific +// resource group for a given project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#edit-an-existing-resource-group +func (s *ResourceGroupService) EditAnExistingResourceGroup(pid interface{}, key string, opts *EditAnExistingResourceGroupOptions, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodPut, u, opts, options) + if err != nil { + return nil, nil, err + } + + rg := new(ResourceGroup) + resp, err := s.client.Do(req, rg) + if err != nil { + return nil, resp, err + } + + return rg, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_iteration_events.go similarity index 98% rename from vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go rename to vendor/gitlab.com/gitlab-org/api/client-go/resource_iteration_events.go index 7ad354a7..142cb9e6 100644 --- a/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/resource_iteration_events.go @@ -50,7 +50,7 @@ type Iteration struct { ID int `json:"id"` IID int `json:"iid"` Sequence int `json:"sequence"` - GroupId int `json:"group_id"` + GroupID int `json:"group_id"` Title string `json:"title"` Description string `json:"description"` State int `json:"state"` diff --git a/vendor/github.com/xanzy/go-gitlab/resource_label_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_label_events.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/resource_label_events.go rename to vendor/gitlab.com/gitlab-org/api/client-go/resource_label_events.go diff --git a/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_milestone_events.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go rename to vendor/gitlab.com/gitlab-org/api/client-go/resource_milestone_events.go diff --git a/vendor/github.com/xanzy/go-gitlab/resource_state_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_state_events.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/resource_state_events.go rename to vendor/gitlab.com/gitlab-org/api/client-go/resource_state_events.go diff --git a/vendor/github.com/xanzy/go-gitlab/resource_weight_events.go b/vendor/gitlab.com/gitlab-org/api/client-go/resource_weight_events.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/resource_weight_events.go rename to vendor/gitlab.com/gitlab-org/api/client-go/resource_weight_events.go diff --git a/vendor/github.com/xanzy/go-gitlab/runners.go b/vendor/gitlab.com/gitlab-org/api/client-go/runners.go similarity index 93% rename from vendor/github.com/xanzy/go-gitlab/runners.go rename to vendor/gitlab.com/gitlab-org/api/client-go/runners.go index 5224cf91..8c255f79 100644 --- a/vendor/github.com/xanzy/go-gitlab/runners.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/runners.go @@ -52,19 +52,20 @@ type Runner struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/runners.html type RunnerDetails struct { - Paused bool `json:"paused"` - Architecture string `json:"architecture"` - Description string `json:"description"` - ID int `json:"id"` - IPAddress string `json:"ip_address"` - IsShared bool `json:"is_shared"` - RunnerType string `json:"runner_type"` - ContactedAt *time.Time `json:"contacted_at"` - Name string `json:"name"` - Online bool `json:"online"` - Status string `json:"status"` - Platform string `json:"platform"` - Projects []struct { + Paused bool `json:"paused"` + Architecture string `json:"architecture"` + Description string `json:"description"` + ID int `json:"id"` + IPAddress string `json:"ip_address"` + IsShared bool `json:"is_shared"` + RunnerType string `json:"runner_type"` + ContactedAt *time.Time `json:"contacted_at"` + MaintenanceNote string `json:"maintenance_note"` + Name string `json:"name"` + Online bool `json:"online"` + Status string `json:"status"` + Platform string `json:"platform"` + Projects []struct { ID int `json:"id"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` @@ -173,13 +174,14 @@ func (s *RunnersService) GetRunnerDetails(rid interface{}, options ...RequestOpt // GitLab API docs: // https://docs.gitlab.com/ee/api/runners.html#update-runners-details type UpdateRunnerDetailsOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` - Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` - TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` - RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` - Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` - AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` - MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` + TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` + RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` + Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` + AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` + MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` // Deprecated: Use Paused instead. (Deprecated in GitLab 14.8) Active *bool `url:"active,omitempty" json:"active,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/search.go b/vendor/gitlab.com/gitlab-org/api/client-go/search.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/search.go rename to vendor/gitlab.com/gitlab-org/api/client-go/search.go diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/gitlab.com/gitlab-org/api/client-go/services.go similarity index 73% rename from vendor/github.com/xanzy/go-gitlab/services.go rename to vendor/gitlab.com/gitlab-org/api/client-go/services.go index 5be4a4f5..c8ae0c87 100644 --- a/vendor/github.com/xanzy/go-gitlab/services.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/services.go @@ -36,32 +36,36 @@ type ServicesService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html type Service struct { - ID int `json:"id"` - Title string `json:"title"` - Slug string `json:"slug"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - Active bool `json:"active"` - PushEvents bool `json:"push_events"` - IssuesEvents bool `json:"issues_events"` - AlertEvents bool `json:"alert_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - CommitEvents bool `json:"commit_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - CommentOnEventEnabled bool `json:"comment_on_event_enabled"` - TagPushEvents bool `json:"tag_push_events"` - NoteEvents bool `json:"note_events"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - PipelineEvents bool `json:"pipeline_events"` - JobEvents bool `json:"job_events"` - WikiPageEvents bool `json:"wiki_page_events"` - VulnerabilityEvents bool `json:"vulnerability_events"` - DeploymentEvents bool `json:"deployment_events"` + ID int `json:"id"` + Title string `json:"title"` + Slug string `json:"slug"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Active bool `json:"active"` + AlertEvents bool `json:"alert_events"` + CommitEvents bool `json:"commit_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + DeploymentEvents bool `json:"deployment_events"` + GroupConfidentialMentionEvents bool `json:"group_confidential_mention_events"` + GroupMentionEvents bool `json:"group_mention_events"` + IncidentEvents bool `json:"incident_events"` + IssuesEvents bool `json:"issues_events"` + JobEvents bool `json:"job_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + NoteEvents bool `json:"note_events"` + PipelineEvents bool `json:"pipeline_events"` + PushEvents bool `json:"push_events"` + TagPushEvents bool `json:"tag_push_events"` + VulnerabilityEvents bool `json:"vulnerability_events"` + WikiPageEvents bool `json:"wiki_page_events"` + CommentOnEventEnabled bool `json:"comment_on_event_enabled"` + Inherited bool `json:"inherited"` } // ListServices gets a list of all active services. // -// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html#list-all-active-services +// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html#list-all-active-integrations func (s *ServicesService) ListServices(pid interface{}, options ...RequestOptionFunc) ([]*Service, *Response, error) { project, err := parseID(pid) if err != nil { @@ -105,7 +109,7 @@ type CustomIssueTrackerServiceProperties struct { // GetCustomIssueTrackerService gets Custom Issue Tracker service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-custom-issue-tracker-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-custom-issue-tracker-settings func (s *ServicesService) GetCustomIssueTrackerService(pid interface{}, options ...RequestOptionFunc) (*CustomIssueTrackerService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -131,39 +135,42 @@ func (s *ServicesService) GetCustomIssueTrackerService(pid interface{}, options // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-custom-issue-tracker-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-a-custom-issue-tracker type SetCustomIssueTrackerServiceOptions struct { NewIssueURL *string `url:"new_issue_url,omitempty" json:"new_issue_url,omitempty"` IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` } // SetCustomIssueTrackerService sets Custom Issue Tracker service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-custom-issue-tracker-service -func (s *ServicesService) SetCustomIssueTrackerService(pid interface{}, opt *SetCustomIssueTrackerServiceOptions, options ...RequestOptionFunc) (*Response, error) { +// https://docs.gitlab.com/ee/api/integrations.html#set-up-a-custom-issue-tracker +func (s *ServicesService) SetCustomIssueTrackerService(pid interface{}, opt *SetCustomIssueTrackerServiceOptions, options ...RequestOptionFunc) (*CustomIssueTrackerService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(CustomIssueTrackerService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteCustomIssueTrackerService deletes Custom Issue Tracker service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-custom-issue-tracker-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-a-custom-issue-tracker func (s *ServicesService) DeleteCustomIssueTrackerService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -204,7 +211,7 @@ type DataDogServiceProperties struct { // GetDataDogService gets DataDog service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-datadog-integration-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-datadog-settings func (s *ServicesService) GetDataDogService(pid interface{}, options ...RequestOptionFunc) (*DataDogService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -230,7 +237,7 @@ func (s *ServicesService) GetDataDogService(pid interface{}, options ...RequestO // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-datadog-integration +// https://docs.gitlab.com/ee/api/integrations.html#set-up-datadog type SetDataDogServiceOptions struct { APIKey *string `url:"api_key,omitempty" json:"api_key,omitempty"` APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` @@ -244,26 +251,32 @@ type SetDataDogServiceOptions struct { // SetDataDogService sets DataDog service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-datadog-integration -func (s *ServicesService) SetDataDogService(pid interface{}, opt *SetDataDogServiceOptions, options ...RequestOptionFunc) (*Response, error) { +// https://docs.gitlab.com/ee/api/integrations.html#set-up-datadog +func (s *ServicesService) SetDataDogService(pid interface{}, opt *SetDataDogServiceOptions, options ...RequestOptionFunc) (*DataDogService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/datadog", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(DataDogService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteDataDogService deletes the DataDog service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-datadog-integration +// https://docs.gitlab.com/ee/api/integrations.html#disable-datadog func (s *ServicesService) DeleteDataDogService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -282,7 +295,7 @@ func (s *ServicesService) DeleteDataDogService(pid interface{}, options ...Reque // DiscordService represents Discord service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#discord +// https://docs.gitlab.com/ee/api/integrations.html#discord-notifications type DiscordService struct { Service Properties *DiscordServiceProperties `json:"properties"` @@ -291,7 +304,7 @@ type DiscordService struct { // DiscordServiceProperties represents Discord specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#discord +// https://docs.gitlab.com/ee/api/integrations.html#discord-notifications type DiscordServiceProperties struct { BranchesToBeNotified string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` NotifyOnlyBrokenPipelines bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` @@ -300,7 +313,7 @@ type DiscordServiceProperties struct { // GetDiscordService gets Discord service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-discord-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-discord-notifications-settings func (s *ServicesService) GetDiscordService(pid interface{}, options ...RequestOptionFunc) (*DiscordService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -326,45 +339,66 @@ func (s *ServicesService) GetDiscordService(pid interface{}, options ...RequestO // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-discord-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-discord-notifications type SetDiscordServiceOptions struct { - WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialIssuesChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` + GroupConfidentialMentionsEvents *bool `url:"group_confidential_mentions_events,omitempty" json:"group_confidential_mentions_events,omitempty"` + GroupConfidentialMentionsChannel *string `url:"group_confidential_mentions_channel,omitempty" json:"group_confidential_mentions_channel,omitempty"` + GroupMentionsEvents *bool `url:"group_mentions_events,omitempty" json:"group_mentions_events,omitempty"` + GroupMentionsChannel *string `url:"group_mentions_channel,omitempty" json:"group_mentions_channel,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` } // SetDiscordService sets Discord service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-discord-service -func (s *ServicesService) SetDiscordService(pid interface{}, opt *SetDiscordServiceOptions, options ...RequestOptionFunc) (*Response, error) { +// https://docs.gitlab.com/ee/api/integrations.html#set-up-discord-notifications +func (s *ServicesService) SetDiscordService(pid interface{}, opt *SetDiscordServiceOptions, options ...RequestOptionFunc) (*DiscordService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(DiscordService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil } // DeleteDiscordService deletes Discord service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-discord-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-discord-notifications func (s *ServicesService) DeleteDiscordService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -441,19 +475,25 @@ type SetDroneCIServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#set-up-drone -func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServiceOptions, options ...RequestOptionFunc) (*DroneCIService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(DroneCIService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteDroneCIService deletes Drone CI service settings for a project. @@ -526,7 +566,7 @@ func (s *ServicesService) GetEmailsOnPushService(pid interface{}, options ...Req // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-emails-on-push-integration +// https://docs.gitlab.com/ee/api/integrations.html#set-up-emails-on-push type SetEmailsOnPushServiceOptions struct { Recipients *string `url:"recipients,omitempty" json:"recipients,omitempty"` DisableDiffs *bool `url:"disable_diffs,omitempty" json:"disable_diffs,omitempty"` @@ -539,26 +579,32 @@ type SetEmailsOnPushServiceOptions struct { // SetEmailsOnPushService sets Emails on Push service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-emails-on-push-integration -func (s *ServicesService) SetEmailsOnPushService(pid interface{}, opt *SetEmailsOnPushServiceOptions, options ...RequestOptionFunc) (*Response, error) { +// https://docs.gitlab.com/ee/api/integrations.html#set-up-emails-on-push +func (s *ServicesService) SetEmailsOnPushService(pid interface{}, opt *SetEmailsOnPushServiceOptions, options ...RequestOptionFunc) (*EmailsOnPushService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(EmailsOnPushService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteEmailsOnPushService deletes Emails on Push service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-emails-on-push-integration +// https://docs.gitlab.com/ee/api/integrations.html#disable-emails-on-push func (s *ServicesService) DeleteEmailsOnPushService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -594,7 +640,7 @@ type ExternalWikiServiceProperties struct { // GetExternalWikiService gets External Wiki service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-external-wiki-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-external-wiki-settings func (s *ServicesService) GetExternalWikiService(pid interface{}, options ...RequestOptionFunc) (*ExternalWikiService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -620,7 +666,7 @@ func (s *ServicesService) GetExternalWikiService(pid interface{}, options ...Req // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-external-wiki-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-an-external-wiki type SetExternalWikiServiceOptions struct { ExternalWikiURL *string `url:"external_wiki_url,omitempty" json:"external_wiki_url,omitempty"` } @@ -628,26 +674,32 @@ type SetExternalWikiServiceOptions struct { // SetExternalWikiService sets External Wiki service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-external-wiki-service -func (s *ServicesService) SetExternalWikiService(pid interface{}, opt *SetExternalWikiServiceOptions, options ...RequestOptionFunc) (*Response, error) { +// https://docs.gitlab.com/ee/api/integrations.html#set-up-an-external-wiki +func (s *ServicesService) SetExternalWikiService(pid interface{}, opt *SetExternalWikiServiceOptions, options ...RequestOptionFunc) (*ExternalWikiService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(ExternalWikiService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteExternalWikiService deletes External Wiki service for project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-external-wiki-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-an-external-wiki func (s *ServicesService) DeleteExternalWikiService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -666,7 +718,7 @@ func (s *ServicesService) DeleteExternalWikiService(pid interface{}, options ... // GithubService represents Github service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#github-premium +// https://docs.gitlab.com/ee/api/integrations.html#github type GithubService struct { Service Properties *GithubServiceProperties `json:"properties"` @@ -675,7 +727,7 @@ type GithubService struct { // GithubServiceProperties represents Github specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#github-premium +// https://docs.gitlab.com/ee/api/integrations.html#github type GithubServiceProperties struct { RepositoryURL string `json:"repository_url"` StaticContext bool `json:"static_context"` @@ -684,7 +736,7 @@ type GithubServiceProperties struct { // GetGithubService gets Github service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-github-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-github-settings func (s *ServicesService) GetGithubService(pid interface{}, options ...RequestOptionFunc) (*GithubService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -710,7 +762,7 @@ func (s *ServicesService) GetGithubService(pid interface{}, options ...RequestOp // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-github-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-github type SetGithubServiceOptions struct { Token *string `url:"token,omitempty" json:"token,omitempty"` RepositoryURL *string `url:"repository_url,omitempty" json:"repository_url,omitempty"` @@ -720,26 +772,32 @@ type SetGithubServiceOptions struct { // SetGithubService sets Github service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-github-service -func (s *ServicesService) SetGithubService(pid interface{}, opt *SetGithubServiceOptions, options ...RequestOptionFunc) (*Response, error) { +// https://docs.gitlab.com/ee/api/integrations.html#set-up-github +func (s *ServicesService) SetGithubService(pid interface{}, opt *SetGithubServiceOptions, options ...RequestOptionFunc) (*GithubService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(GithubService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteGithubService deletes Github service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-github-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-github func (s *ServicesService) DeleteGithubService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -755,6 +813,109 @@ func (s *ServicesService) DeleteGithubService(pid interface{}, options ...Reques return s.client.Do(req, nil) } +// HarborService represents the Harbor service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#harbor +type HarborService struct { + Service + Properties *HarborServiceProperties `json:"properties"` +} + +// HarborServiceProperties represents Harbor specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#harbor +type HarborServiceProperties struct { + URL string `json:"url"` + ProjectName string `json:"project_name"` + Username string `json:"username"` + Password string `json:"password"` + UseInheritedSettings bool `json:"use_inherited_settings"` +} + +// GetHarborService gets Harbor service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-harbor-settings +func (s *ServicesService) GetHarborService(pid interface{}, options ...RequestOptionFunc) (*HarborService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(HarborService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetHarborServiceOptions represents the available SetHarborService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-harbor +type SetHarborServiceOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + ProjectName *string `url:"project_name,omitempty" json:"project_name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} + +// SetHarborService sets Harbor service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-harbor +func (s *ServicesService) SetHarborService(pid interface{}, opt *SetHarborServiceOptions, options ...RequestOptionFunc) (*HarborService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + svc := new(HarborService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil +} + +// DeleteHarborService deletes Harbor service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-harbor +func (s *ServicesService) DeleteHarborService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // SlackApplication represents GitLab for slack application settings. // // GitLab API docs: @@ -769,22 +930,24 @@ type SlackApplication struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#gitlab-for-slack-app type SlackApplicationProperties struct { - Channel string `json:"channel"` - NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` - BranchesToBeNotified string `json:"branches_to_be_notified"` - AlertEvents bool `json:"alert_events"` - IssuesEvents bool `json:"issues_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - NoteEvents bool `json:"note_events"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - DeploymentEvents bool `json:"deployment_events"` - IncidentsEvents bool `json:"incidents_events"` - PipelineEvents bool `json:"pipeline_events"` - PushEvents bool `json:"push_events"` - TagPushEvents bool `json:"tag_push_events"` - VulnerabilityEvents bool `json:"vulnerability_events"` - WikiPageEvents bool `json:"wiki_page_events"` + Channel string `json:"channel"` + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + LabelsToBeNotified string `json:"labels_to_be_notified"` + LabelsToBeNotifiedBehavior string `json:"labels_to_be_notified_behavior"` + PushChannel string `json:"push_channel"` + IssueChannel string `json:"issue_channel"` + ConfidentialIssueChannel string `json:"confidential_issue_channel"` + MergeRequestChannel string `json:"merge_request_channel"` + NoteChannel string `json:"note_channel"` + ConfidentialNoteChannel string `json:"confidential_note_channel"` + TagPushChannel string `json:"tag_push_channel"` + PipelineChannel string `json:"pipeline_channel"` + WikiPageChannel string `json:"wiki_page_channel"` + DeploymentChannel string `json:"deployment_channel"` + IncidentChannel string `json:"incident_channel"` + VulnerabilityChannel string `json:"vulnerability_channel"` + AlertChannel string `json:"alert_channel"` // Deprecated: This parameter has been replaced with BranchesToBeNotified. NotifyOnlyDefaultBranch bool `json:"notify_only_default_branch"` @@ -822,22 +985,38 @@ func (s *ServicesService) GetSlackApplication(pid interface{}, options ...Reques // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#set-up-gitlab-for-slack-app type SetSlackApplicationOptions struct { - Channel *string `url:"channel,omitempty" json:"channel,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - IncidentsEvents *bool `url:"incidents_events,omitempty" json:"incidents_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + Channel *string `url:"channel,omitempty" json:"channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + IncidentsEvents *bool `url:"incidents_events,omitempty" json:"incidents_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + LabelsToBeNotified *string `url:"labels_to_be_notified,omitempty" json:"labels_to_be_notified,omitempty"` + LabelsToBeNotifiedBehavior *string `url:"labels_to_be_notified_behavior,omitempty" json:"labels_to_be_notified_behavior,omitempty"` + PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` + IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` + ConfidentialIssueChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` + MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` + NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` + ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` + TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` + PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` + WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` + DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` + IncidentChannel *string `url:"incident_channel,omitempty" json:"incident_channel,omitempty"` + VulnerabilityChannel *string `url:"vulnerability_channel,omitempty" json:"vulnerability_channel,omitempty"` + AlertChannel *string `url:"alert_channel,omitempty" json:"alert_channel,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` // Deprecated: This parameter has been replaced with BranchesToBeNotified. NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` @@ -847,19 +1026,25 @@ type SetSlackApplicationOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#set-up-gitlab-for-slack-app -func (s *ServicesService) SetSlackApplication(pid interface{}, opt *SetSlackApplicationOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetSlackApplication(pid interface{}, opt *SetSlackApplicationOptions, options ...RequestOptionFunc) (*SlackApplication, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(SlackApplication) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DisableSlackApplication disable the GitLab for Slack app integration for a project. @@ -1042,19 +1227,25 @@ type SetJenkinsCIServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#set-up-jenkins -func (s *ServicesService) SetJenkinsCIService(pid interface{}, opt *SetJenkinsCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetJenkinsCIService(pid interface{}, opt *SetJenkinsCIServiceOptions, options ...RequestOptionFunc) (*JenkinsCIService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(JenkinsCIService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteJenkinsCIService deletes Jenkins CI service for project. @@ -1090,12 +1281,25 @@ type JiraService struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#jira type JiraServiceProperties struct { - URL string `json:"url"` - APIURL string `json:"api_url"` - ProjectKey string `json:"project_key" ` - Username string `json:"username" ` - Password string `json:"password" ` - JiraIssueTransitionID string `json:"jira_issue_transition_id"` + URL string `json:"url"` + APIURL string `json:"api_url"` + Username string `json:"username" ` + Password string `json:"password" ` + Active bool `json:"active"` + JiraAuthType int `json:"jira_auth_type"` + JiraIssuePrefix string `json:"jira_issue_prefix"` + JiraIssueRegex string `json:"jira_issue_regex"` + JiraIssueTransitionAutomatic bool `json:"jira_issue_transition_automatic"` + JiraIssueTransitionID string `json:"jira_issue_transition_id"` + CommitEvents bool `json:"commit_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + CommentOnEventEnabled bool `json:"comment_on_event_enabled"` + IssuesEnabled bool `json:"issues_enabled"` + ProjectKeys []string `json:"project_keys" ` + UseInheritedSettings bool `json:"use_inherited_settings"` + + // Deprecated: This parameter was removed in GitLab 17.0 + ProjectKey string `json:"project_key" ` } // UnmarshalJSON decodes the Jira Service Properties. @@ -1137,7 +1341,7 @@ func (s *ServicesService) GetJiraService(pid interface{}, options ...RequestOpti if err != nil { return nil, nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) if err != nil { @@ -1159,35 +1363,50 @@ func (s *ServicesService) GetJiraService(pid interface{}, options ...RequestOpti // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#edit-jira-service type SetJiraServiceOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - ProjectKey *string `url:"project_key,omitempty" json:"project_key,omitempty" ` - Username *string `url:"username,omitempty" json:"username,omitempty" ` - Password *string `url:"password,omitempty" json:"password,omitempty" ` - Active *bool `url:"active,omitempty" json:"active,omitempty"` - JiraIssueTransitionID *string `url:"jira_issue_transition_id,omitempty" json:"jira_issue_transition_id,omitempty"` - CommitEvents *bool `url:"commit_events,omitempty" json:"commit_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - CommentOnEventEnabled *bool `url:"comment_on_event_enabled,omitempty" json:"comment_on_event_enabled,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty" ` + Password *string `url:"password,omitempty" json:"password,omitempty" ` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + JiraAuthType *int `url:"jira_auth_type,omitempty" json:"jira_auth_type,omitempty"` + JiraIssuePrefix *string `url:"jira_issue_prefix,omitempty" json:"jira_issue_prefix,omitempty"` + JiraIssueRegex *string `url:"jira_issue_regex,omitempty" json:"jira_issue_regex,omitempty"` + JiraIssueTransitionAutomatic *bool `url:"jira_issue_transition_automatic,omitempty" json:"jira_issue_transition_automatic,omitempty"` + JiraIssueTransitionID *string `url:"jira_issue_transition_id,omitempty" json:"jira_issue_transition_id,omitempty"` + CommitEvents *bool `url:"commit_events,omitempty" json:"commit_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + CommentOnEventEnabled *bool `url:"comment_on_event_enabled,omitempty" json:"comment_on_event_enabled,omitempty"` + IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` + ProjectKeys *[]string `url:"project_keys,comma,omitempty" json:"project_keys,omitempty" ` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` + + // Deprecated: This parameter was removed in GitLab 17.0 + ProjectKey *string `url:"project_key,omitempty" json:"project_key,omitempty" ` } // SetJiraService sets Jira service for a project // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#edit-jira-service -func (s *ServicesService) SetJiraService(pid interface{}, opt *SetJiraServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetJiraService(pid interface{}, opt *SetJiraServiceOptions, options ...RequestOptionFunc) (*JiraService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(JiraService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteJiraService deletes Jira service for project. @@ -1199,7 +1418,7 @@ func (s *ServicesService) DeleteJiraService(pid interface{}, options ...RequestO if err != nil { return nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) if err != nil { @@ -1296,6 +1515,50 @@ type SetMattermostServiceOptions struct { WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` } +// SetMattermostService sets Mattermost service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-notifications-service +func (s *ServicesService) SetMattermostService(pid interface{}, opt *SetMattermostServiceOptions, options ...RequestOptionFunc) (*MattermostService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + svc := new(MattermostService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil +} + +// DeleteMattermostService deletes Mattermost service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-mattermost-notifications-service +func (s *ServicesService) DeleteMattermostService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // MattermostSlashCommandsService represents Mattermost slash commands settings. // // GitLab API docs: @@ -1353,19 +1616,25 @@ type SetMattermostSlashCommandsServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-slash-command-integration -func (s *ServicesService) SetMattermostSlashCommandsService(pid interface{}, opt *SetMattermostSlashCommandsServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetMattermostSlashCommandsService(pid interface{}, opt *SetMattermostSlashCommandsServiceOptions, options ...RequestOptionFunc) (*MattermostSlashCommandsService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(MattermostSlashCommandsService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteMattermostSlashCommandsService deletes Mattermost slash commands service for project. @@ -1387,44 +1656,6 @@ func (s *ServicesService) DeleteMattermostSlashCommandsService(pid interface{}, return s.client.Do(req, nil) } -// SetMattermostService sets Mattermost service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-notifications-service -func (s *ServicesService) SetMattermostService(pid interface{}, opt *SetMattermostServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteMattermostService deletes Mattermost service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-mattermost-notifications-service -func (s *ServicesService) DeleteMattermostService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - // MicrosoftTeamsService represents Microsoft Teams service settings. // // GitLab API docs: @@ -1501,18 +1732,25 @@ type SetMicrosoftTeamsServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#create-edit-microsoft-teams-service -func (s *ServicesService) SetMicrosoftTeamsService(pid interface{}, opt *SetMicrosoftTeamsServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetMicrosoftTeamsService(pid interface{}, opt *SetMicrosoftTeamsServiceOptions, options ...RequestOptionFunc) (*MicrosoftTeamsService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + + svc := new(MicrosoftTeamsService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteMicrosoftTeamsService deletes Microsoft Teams service for project. @@ -1597,19 +1835,25 @@ type SetPipelinesEmailServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails -func (s *ServicesService) SetPipelinesEmailService(pid interface{}, opt *SetPipelinesEmailServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetPipelinesEmailService(pid interface{}, opt *SetPipelinesEmailServiceOptions, options ...RequestOptionFunc) (*PipelinesEmailService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(PipelinesEmailService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeletePipelinesEmailService deletes Pipelines Email service settings for a project. @@ -1690,19 +1934,25 @@ type SetPrometheusServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#createedit-prometheus-service -func (s *ServicesService) SetPrometheusService(pid interface{}, opt *SetPrometheusServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetPrometheusService(pid interface{}, opt *SetPrometheusServiceOptions, options ...RequestOptionFunc) (*PrometheusService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/prometheus", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(PrometheusService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeletePrometheusService deletes Prometheus service settings for a project. @@ -1724,6 +1974,107 @@ func (s *ServicesService) DeletePrometheusService(pid interface{}, options ...Re return s.client.Do(req, nil) } +// RedmineService represents the Redmine service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#redmine +type RedmineService struct { + Service + Properties *RedmineServiceProperties `json:"properties"` +} + +// RedmineServiceProperties represents Redmine specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#redmine +type RedmineServiceProperties struct { + NewIssueURL string `json:"new_issue_url"` + ProjectURL string `json:"project_url"` + IssuesURL string `json:"issues_url"` + UseInheritedSettings BoolValue `json:"use_inherited_settings"` +} + +// GetRedmineService gets Redmine service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-redmine-settings +func (s *ServicesService) GetRedmineService(pid interface{}, options ...RequestOptionFunc) (*RedmineService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(RedmineService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetRedmineServiceOptions represents the available SetRedmineService(). +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-redmine +type SetRedmineServiceOptions struct { + NewIssueURL *string `url:"new_issue_url,omitempty" json:"new_issue_url,omitempty"` + ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` + IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} + +// SetRedmineService sets Redmine service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-redmine +func (s *ServicesService) SetRedmineService(pid interface{}, opt *SetRedmineServiceOptions, options ...RequestOptionFunc) (*RedmineService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + svc := new(RedmineService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil +} + +// DeleteRedmineService deletes Redmine service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-redmine +func (s *ServicesService) DeleteRedmineService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // SlackService represents Slack service settings. // // GitLab API docs: @@ -1823,19 +2174,25 @@ type SetSlackServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#edit-slack-service -func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceOptions, options ...RequestOptionFunc) (*SlackService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(SlackService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteSlackService deletes Slack service for project. @@ -1912,19 +2269,25 @@ type SetSlackSlashCommandsServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/13.12/ee/api/integrations.html#createedit-slack-slash-command-service -func (s *ServicesService) SetSlackSlashCommandsService(pid interface{}, opt *SetSlackSlashCommandsServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetSlackSlashCommandsService(pid interface{}, opt *SetSlackSlashCommandsServiceOptions, options ...RequestOptionFunc) (*SlackSlashCommandsService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(SlackSlashCommandsService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteSlackSlashCommandsService deletes Slack slash commands service for project. @@ -2015,19 +2378,25 @@ type SetTelegramServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#set-up-telegram -func (s *ServicesService) SetTelegramService(pid interface{}, opt *SetTelegramServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetTelegramService(pid interface{}, opt *SetTelegramServiceOptions, options ...RequestOptionFunc) (*TelegramService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + svc := new(TelegramService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteTelegramService deletes Telegram service for project. @@ -2110,19 +2479,25 @@ type SetYouTrackServiceOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#createedit-youtrack-service -func (s *ServicesService) SetYouTrackService(pid interface{}, opt *SetYouTrackServiceOptions, options ...RequestOptionFunc) (*Response, error) { +func (s *ServicesService) SetYouTrackService(pid interface{}, opt *SetYouTrackServiceOptions, options ...RequestOptionFunc) (*YouTrackService, *Response, error) { project, err := parseID(pid) if err != nil { - return nil, err + return nil, nil, err } u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) + svc := new(YouTrackService) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(req, nil) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, nil, err + } + + return svc, resp, nil } // DeleteYouTrackService deletes YouTrack service settings for a project. diff --git a/vendor/gitlab.com/gitlab-org/api/client-go/settings.go b/vendor/gitlab.com/gitlab-org/api/client-go/settings.go new file mode 100644 index 00000000..0b8c0837 --- /dev/null +++ b/vendor/gitlab.com/gitlab-org/api/client-go/settings.go @@ -0,0 +1,954 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "net/http" + "time" +) + +// SettingsService handles communication with the application SettingsService +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/settings.html +type SettingsService struct { + client *Client +} + +// Settings represents the GitLab application settings. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/settings.html +// +// The available parameters have been modeled directly after the code, as the +// documentation seems to be inaccurate. +// +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/settings.rb +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/entities/application_setting.rb#L5 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/app/helpers/application_settings_helper.rb#L192 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/lib/ee/api/helpers/settings_helpers.rb#L10 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/app/helpers/ee/application_settings_helper.rb#L20 +type Settings struct { + ID int `json:"id"` + AbuseNotificationEmail string `json:"abuse_notification_email"` + AdminMode bool `json:"admin_mode"` + AfterSignOutPath string `json:"after_sign_out_path"` + AfterSignUpText string `json:"after_sign_up_text"` + AkismetAPIKey string `json:"akismet_api_key"` + AkismetEnabled bool `json:"akismet_enabled"` + AllowAccountDeletion bool `json:"allow_account_deletion"` + AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` + AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` + AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` + AllowProjectCreationForGuestAndBelow bool `json:"allow_project_creation_for_guest_and_below"` + AllowRunnerRegistrationToken bool `json:"allow_runner_registration_token"` + ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` + ASCIIDocMaxIncludes int `json:"asciidoc_max_includes"` + AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` + AssetProxyEnabled bool `json:"asset_proxy_enabled"` + AssetProxyURL string `json:"asset_proxy_url"` + AssetProxySecretKey string `json:"asset_proxy_secret_key"` + AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` + AutoBanUserOnExcessiveProjectsDownload bool `json:"auto_ban_user_on_excessive_projects_download"` + AutoDevOpsDomain string `json:"auto_devops_domain"` + AutoDevOpsEnabled bool `json:"auto_devops_enabled"` + AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` + BulkImportConcurrentPipelineBatchLimit int `json:"bulk_import_concurrent_pipeline_batch_limit"` + BulkImportEnabled bool `json:"bulk_import_enabled"` + BulkImportMaxDownloadFileSize int `json:"bulk_import_max_download_file_size"` + CanCreateGroup bool `json:"can_create_group"` + CheckNamespacePlan bool `json:"check_namespace_plan"` + CIMaxIncludes int `json:"ci_max_includes"` + CIMaxTotalYAMLSizeBytes int `json:"ci_max_total_yaml_size_bytes"` + CommitEmailHostname string `json:"commit_email_hostname"` + ConcurrentBitbucketImportJobsLimit int `json:"concurrent_bitbucket_import_jobs_limit"` + ConcurrentBitbucketServerImportJobsLimit int `json:"concurrent_bitbucket_server_import_jobs_limit"` + ConcurrentGitHubImportJobsLimit int `json:"concurrent_github_import_jobs_limit"` + ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` + ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` + ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` + ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` + ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` + ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` + ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` + ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` + ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` + ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` + ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` + ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` + CreatedAt *time.Time `json:"created_at"` + CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` + DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` + DSAKeyRestriction int `json:"dsa_key_restriction"` + DeactivateDormantUsers bool `json:"deactivate_dormant_users"` + DeactivateDormantUsersPeriod int `json:"deactivate_dormant_users_period"` + DecompressArchiveFileTimeout int `json:"decompress_archive_file_timeout"` + DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` + DefaultBranchName string `json:"default_branch_name"` + DefaultBranchProtection int `json:"default_branch_protection"` + DefaultBranchProtectionDefaults *BranchProtectionDefaults `json:"default_branch_protection_defaults,omitempty"` + DefaultCiConfigPath string `json:"default_ci_config_path"` + DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` + DefaultPreferredLanguage string `json:"default_preferred_language"` + DefaultProjectCreation int `json:"default_project_creation"` + DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` + DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` + DefaultProjectsLimit int `json:"default_projects_limit"` + DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` + DefaultSyntaxHighlightingTheme int `json:"default_syntax_highlighting_theme"` + DelayedGroupDeletion bool `json:"delayed_group_deletion"` + DelayedProjectDeletion bool `json:"delayed_project_deletion"` + DeleteInactiveProjects bool `json:"delete_inactive_projects"` + DeleteUnconfirmedUsers bool `json:"delete_unconfirmed_users"` + DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` + DiagramsnetEnabled bool `json:"diagramsnet_enabled"` + DiagramsnetURL string `json:"diagramsnet_url"` + DiffMaxFiles int `json:"diff_max_files"` + DiffMaxLines int `json:"diff_max_lines"` + DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` + DisableAdminOAuthScopes bool `json:"disable_admin_oauth_scopes"` + DisableFeedToken bool `json:"disable_feed_token"` + DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` + DisablePersonalAccessTokens bool `json:"disable_personal_access_tokens"` + DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` + DomainAllowlist []string `json:"domain_allowlist"` + DomainDenylist []string `json:"domain_denylist"` + DomainDenylistEnabled bool `json:"domain_denylist_enabled"` + DownstreamPipelineTriggerLimitPerProjectUserSHA int `json:"downstream_pipeline_trigger_limit_per_project_user_sha"` + DuoFeaturesEnabled bool `json:"duo_features_enabled"` + ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` + ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` + EKSAccessKeyID string `json:"eks_access_key_id"` + EKSAccountID string `json:"eks_account_id"` + EKSIntegrationEnabled bool `json:"eks_integration_enabled"` + EKSSecretAccessKey string `json:"eks_secret_access_key"` + Ed25519KeyRestriction int `json:"ed25519_key_restriction"` + Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` + ElasticsearchAWS bool `json:"elasticsearch_aws"` + ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` + ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` + ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` + ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` + ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` + ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` + ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` + ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` + ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` + ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` + ElasticsearchIndexing bool `json:"elasticsearch_indexing"` + ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` + ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` + ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` + ElasticsearchMaxCodeIndexingConcurrency int `json:"elasticsearch_max_code_indexing_concurrency"` + ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` + ElasticsearchPassword string `json:"elasticsearch_password"` + ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` + ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` + ElasticsearchReplicas int `json:"elasticsearch_replicas"` + ElasticsearchRequeueWorkers bool `json:"elasticsearch_requeue_workers"` + ElasticsearchSearch bool `json:"elasticsearch_search"` + ElasticsearchShards int `json:"elasticsearch_shards"` + ElasticsearchURL []string `json:"elasticsearch_url"` + ElasticsearchUsername string `json:"elasticsearch_username"` + ElasticsearchWorkerNumberOfShards int `json:"elasticsearch_worker_number_of_shards"` + EmailAdditionalText string `json:"email_additional_text"` + EmailAuthorInBody bool `json:"email_author_in_body"` + EmailConfirmationSetting string `json:"email_confirmation_setting"` + EmailRestrictions string `json:"email_restrictions"` + EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` + EnableArtifactExternalRedirectWarningPage bool `json:"enable_artifact_external_redirect_warning_page"` + EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` + EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` + EnforcePATExpiration bool `json:"enforce_pat_expiration"` + EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` + EnforceTerms bool `json:"enforce_terms"` + ExternalAuthClientCert string `json:"external_auth_client_cert"` + ExternalAuthClientKey string `json:"external_auth_client_key"` + ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` + ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` + ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` + ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` + ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` + ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` + ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` + ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` + FailedLoginAttemptsUnlockPeriodInMinutes int `json:"failed_login_attempts_unlock_period_in_minutes"` + FileTemplateProjectID int `json:"file_template_project_id"` + FirstDayOfWeek int `json:"first_day_of_week"` + FlocEnabled bool `json:"floc_enabled"` + GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` + GeoStatusTimeout int `json:"geo_status_timeout"` + GitRateLimitUsersAlertlist []string `json:"git_rate_limit_users_alertlist"` + GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` + GitalyTimeoutDefault int `json:"gitaly_timeout_default"` + GitalyTimeoutFast int `json:"gitaly_timeout_fast"` + GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` + GitlabDedicatedInstance bool `json:"gitlab_dedicated_instance"` + GitlabEnvironmentToolkitInstance bool `json:"gitlab_environment_toolkit_instance"` + GitlabShellOperationLimit int `json:"gitlab_shell_operation_limit"` + GitpodEnabled bool `json:"gitpod_enabled"` + GitpodURL string `json:"gitpod_url"` + GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` + GloballyAllowedIPs string `json:"globally_allowed_ips"` + GrafanaEnabled bool `json:"grafana_enabled"` + GrafanaURL string `json:"grafana_url"` + GravatarEnabled bool `json:"gravatar_enabled"` + GroupDownloadExportLimit int `json:"group_download_export_limit"` + GroupExportLimit int `json:"group_export_limit"` + GroupImportLimit int `json:"group_import_limit"` + GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` + GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` + HTMLEmailsEnabled bool `json:"html_emails_enabled"` + HashedStorageEnabled bool `json:"hashed_storage_enabled"` + HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` + HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` + HelpPageSupportURL string `json:"help_page_support_url"` + HelpPageText string `json:"help_page_text"` + HelpText string `json:"help_text"` + HideThirdPartyOffers bool `json:"hide_third_party_offers"` + HomePageURL string `json:"home_page_url"` + HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` + HousekeepingEnabled bool `json:"housekeeping_enabled"` + HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` + HousekeepingGcPeriod int `json:"housekeeping_gc_period"` + HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` + HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` + ImportSources []string `json:"import_sources"` + InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` + InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` + InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` + IncludeOptionalMetricsInServicePing bool `json:"include_optional_metrics_in_service_ping"` + InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` + InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` + IssuesCreateLimit int `json:"issues_create_limit"` + JiraConnectApplicationKey string `json:"jira_connect_application_key"` + JiraConnectPublicKeyStorageEnabled bool `json:"jira_connect_public_key_storage_enabled"` + JiraConnectProxyURL string `json:"jira_connect_proxy_url"` + KeepLatestArtifact bool `json:"keep_latest_artifact"` + KrokiEnabled bool `json:"kroki_enabled"` + KrokiFormats map[string]bool `json:"kroki_formats"` + KrokiURL string `json:"kroki_url"` + LocalMarkdownVersion int `json:"local_markdown_version"` + LockDuoFeaturesEnabled bool `json:"lock_duo_features_enabled"` + LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` + LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` + MailgunEventsEnabled bool `json:"mailgun_events_enabled"` + MailgunSigningKey string `json:"mailgun_signing_key"` + MaintenanceMode bool `json:"maintenance_mode"` + MaintenanceModeMessage string `json:"maintenance_mode_message"` + MavenPackageRequestsForwarding bool `json:"maven_package_requests_forwarding"` + MaxArtifactsSize int `json:"max_artifacts_size"` + MaxAttachmentSize int `json:"max_attachment_size"` + MaxDecompressedArchiveSize int `json:"max_decompressed_archive_size"` + MaxExportSize int `json:"max_export_size"` + MaxImportRemoteFileSize int `json:"max_import_remote_file_size"` + MaxImportSize int `json:"max_import_size"` + MaxLoginAttempts int `json:"max_login_attempts"` + MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` + MaxPagesSize int `json:"max_pages_size"` + MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` + MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` + MaxTerraformStateSizeBytes int `json:"max_terraform_state_size_bytes"` + MaxYAMLDepth int `json:"max_yaml_depth"` + MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` + MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` + MinimumPasswordLength int `json:"minimum_password_length"` + MirrorAvailable bool `json:"mirror_available"` + MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` + MirrorMaxCapacity int `json:"mirror_max_capacity"` + MirrorMaxDelay int `json:"mirror_max_delay"` + NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` + NotesCreateLimit int `json:"notes_create_limit"` + NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` + NugetSkipMetadataURLValidation bool `json:"nuget_skip_metadata_url_validation"` + OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` + OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` + PackageMetadataPURLTypes []int `json:"package_metadata_purl_types"` + PackageRegistryAllowAnyoneToPullOption bool `json:"package_registry_allow_anyone_to_pull_option"` + PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` + PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` + PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` + PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` + PasswordNumberRequired bool `json:"password_number_required"` + PasswordSymbolRequired bool `json:"password_symbol_required"` + PasswordUppercaseRequired bool `json:"password_uppercase_required"` + PasswordLowercaseRequired bool `json:"password_lowercase_required"` + PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` + PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` + PerformanceBarEnabled bool `json:"performance_bar_enabled"` + PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` + PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` + PlantumlEnabled bool `json:"plantuml_enabled"` + PlantumlURL string `json:"plantuml_url"` + PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` + PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` + PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` + ProjectDownloadExportLimit int `json:"project_download_export_limit"` + ProjectExportEnabled bool `json:"project_export_enabled"` + ProjectExportLimit int `json:"project_export_limit"` + ProjectImportLimit int `json:"project_import_limit"` + ProjectJobsAPIRateLimit int `json:"project_jobs_api_rate_limit"` + ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` + ProjectsAPIRateLimitUnauthenticated int `json:"projects_api_rate_limit_unauthenticated"` + PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` + ProtectedCIVariables bool `json:"protected_ci_variables"` + PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` + PushEventActivitiesLimit int `json:"push_event_activities_limit"` + PushEventHooksLimit int `json:"push_event_hooks_limit"` + PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` + RSAKeyRestriction int `json:"rsa_key_restriction"` + RateLimitingResponseText string `json:"rate_limiting_response_text"` + RawBlobRequestLimit int `json:"raw_blob_request_limit"` + RecaptchaEnabled bool `json:"recaptcha_enabled"` + RecaptchaPrivateKey string `json:"recaptcha_private_key"` + RecaptchaSiteKey string `json:"recaptcha_site_key"` + ReceiveMaxInputSize int `json:"receive_max_input_size"` + ReceptiveClusterAgentsEnabled bool `json:"receptive_cluster_agents_enabled"` + RememberMeEnabled bool `json:"remember_me_enabled"` + RepositoryChecksEnabled bool `json:"repository_checks_enabled"` + RepositorySizeLimit int `json:"repository_size_limit"` + RepositoryStorages []string `json:"repository_storages"` + RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` + RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` + RequireAdminTwoFactorAuthentication bool `json:"require_admin_two_factor_authentication"` + RequirePersonalAccessTokenExpiry bool `json:"require_personal_access_token_expiry"` + RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` + RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` + RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` + SearchRateLimit int `json:"search_rate_limit"` + SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` + SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` + SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` + SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` + SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` + SecurityApprovalPoliciesLimit int `json:"security_approval_policies_limit"` + SecurityPolicyGlobalGroupApproversEnabled bool `json:"security_policy_global_group_approvers_enabled"` + SecurityTXTContent string `json:"security_txt_content"` + SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` + SentryClientsideDSN string `json:"sentry_clientside_dsn"` + SentryDSN string `json:"sentry_dsn"` + SentryEnabled bool `json:"sentry_enabled"` + SentryEnvironment string `json:"sentry_environment"` + ServiceAccessTokensExpirationEnforced bool `json:"service_access_tokens_expiration_enforced"` + SessionExpireDelay int `json:"session_expire_delay"` + SharedRunnersEnabled bool `json:"shared_runners_enabled"` + SharedRunnersMinutes int `json:"shared_runners_minutes"` + SharedRunnersText string `json:"shared_runners_text"` + SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` + SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` + SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` + SignInText string `json:"sign_in_text"` + SignupEnabled bool `json:"signup_enabled"` + SilentAdminExportsEnabled bool `json:"silent_admin_exports_enabled"` + SilentModeEnabled bool `json:"silent_mode_enabled"` + SlackAppEnabled bool `json:"slack_app_enabled"` + SlackAppID string `json:"slack_app_id"` + SlackAppSecret string `json:"slack_app_secret"` + SlackAppSigningSecret string `json:"slack_app_signing_secret"` + SlackAppVerificationToken string `json:"slack_app_verification_token"` + SnippetSizeLimit int `json:"snippet_size_limit"` + SnowplowAppID string `json:"snowplow_app_id"` + SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` + SnowplowCookieDomain string `json:"snowplow_cookie_domain"` + SnowplowDatabaseCollectorHostname string `json:"snowplow_database_collector_hostname"` + SnowplowEnabled bool `json:"snowplow_enabled"` + SourcegraphEnabled bool `json:"sourcegraph_enabled"` + SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` + SourcegraphURL string `json:"sourcegraph_url"` + SpamCheckAPIKey string `json:"spam_check_api_key"` + SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` + SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` + StaticObjectsExternalStorageAuthToken string `json:"static_objects_external_storage_auth_token"` + StaticObjectsExternalStorageURL string `json:"static_objects_external_storage_url"` + SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` + TerminalMaxSessionTime int `json:"terminal_max_session_time"` + Terms string `json:"terms"` + ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` + ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` + ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` + ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` + ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` + ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` + ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` + ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` + ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` + ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` + ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` + ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` + ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` + ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` + ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` + ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` + ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` + ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` + ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` + ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` + ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` + ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` + ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` + ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` + ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` + ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` + ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` + TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + UnconfirmedUsersDeleteAfterDays int `json:"unconfirmed_users_delete_after_days"` + UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` + UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` + UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` + UpdateRunnerVersionsEnabled bool `json:"update_runner_versions_enabled"` + UpdatedAt *time.Time `json:"updated_at"` + UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` + UsagePingEnabled bool `json:"usage_ping_enabled"` + UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` + UseClickhouseForAnalytics bool `json:"use_clickhouse_for_analytics"` + UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` + UserDefaultExternal bool `json:"user_default_external"` + UserDefaultInternalRegex string `json:"user_default_internal_regex"` + UserDefaultsToPrivateProfile bool `json:"user_defaults_to_private_profile"` + UserOauthApplications bool `json:"user_oauth_applications"` + UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` + UsersGetByIDLimit int `json:"users_get_by_id_limit"` + UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` + ValidRunnerRegistrars []string `json:"valid_runner_registrars"` + VersionCheckEnabled bool `json:"version_check_enabled"` + WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` + WhatsNewVariant string `json:"whats_new_variant"` + WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` + + // Deprecated: Use AbuseNotificationEmail instead. + AdminNotificationEmail string `json:"admin_notification_email"` + // Deprecated: Use AllowLocalRequestsFromWebHooksAndServices instead. + AllowLocalRequestsFromHooksAndServices bool `json:"allow_local_requests_from_hooks_and_services"` + // Deprecated: Use AssetProxyAllowlist instead. + AssetProxyWhitelist []string `json:"asset_proxy_whitelist"` + // Deprecated: Use ThrottleUnauthenticatedWebEnabled or ThrottleUnauthenticatedAPIEnabled instead. (Deprecated in GitLab 14.3) + ThrottleUnauthenticatedEnabled bool `json:"throttle_unauthenticated_enabled"` + // Deprecated: Use ThrottleUnauthenticatedWebPeriodInSeconds or ThrottleUnauthenticatedAPIPeriodInSeconds instead. (Deprecated in GitLab 14.3) + ThrottleUnauthenticatedPeriodInSeconds int `json:"throttle_unauthenticated_period_in_seconds"` + // Deprecated: Use ThrottleUnauthenticatedWebRequestsPerPeriod or ThrottleUnauthenticatedAPIRequestsPerPeriod instead. (Deprecated in GitLab 14.3) + ThrottleUnauthenticatedRequestsPerPeriod int `json:"throttle_unauthenticated_requests_per_period"` + // Deprecated: Replaced by SearchRateLimit in GitLab 14.9 (removed in 15.0). + UserEmailLookupLimit int `json:"user_email_lookup_limit"` +} + +// Settings requires a custom unmarshaller in order to properly unmarshal +// `container_registry_import_created_before` which is either a time.Time or +// an empty string if no value is set. +func (s *Settings) UnmarshalJSON(data []byte) error { + type Alias Settings + + raw := make(map[string]interface{}) + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + + // If empty string, remove the value to leave it nil in the response. + if v, ok := raw["container_registry_import_created_before"]; ok && v == "" { + delete(raw, "container_registry_import_created_before") + + data, err = json.Marshal(raw) + if err != nil { + return err + } + } + + return json.Unmarshal(data, (*Alias)(s)) +} + +func (s Settings) String() string { + return Stringify(s) +} + +// GetSettings gets the current application settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/settings.html#get-current-application-settings +func (s *SettingsService) GetSettings(options ...RequestOptionFunc) (*Settings, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "application/settings", nil, options) + if err != nil { + return nil, nil, err + } + + as := new(Settings) + resp, err := s.client.Do(req, as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} + +// UpdateSettingsOptions represents the available UpdateSettings() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/settings.html#change-application-settings +type UpdateSettingsOptions struct { + AbuseNotificationEmail *string `url:"abuse_notification_email,omitempty" json:"abuse_notification_email,omitempty"` + AdminMode *bool `url:"admin_mode,omitempty" json:"admin_mode,omitempty"` + AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` + AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"` + AfterSignUpText *string `url:"after_sign_up_text,omitempty" json:"after_sign_up_text,omitempty"` + AkismetAPIKey *string `url:"akismet_api_key,omitempty" json:"akismet_api_key,omitempty"` + AkismetEnabled *bool `url:"akismet_enabled,omitempty" json:"akismet_enabled,omitempty"` + AllowAccountDeletion *bool `url:"allow_account_deletion,omitempty" json:"allow_account_deletion,omitempty"` + AllowGroupOwnersToManageLDAP *bool `url:"allow_group_owners_to_manage_ldap,omitempty" json:"allow_group_owners_to_manage_ldap,omitempty"` + AllowLocalRequestsFromHooksAndServices *bool `url:"allow_local_requests_from_hooks_and_services,omitempty" json:"allow_local_requests_from_hooks_and_services,omitempty"` + AllowLocalRequestsFromSystemHooks *bool `url:"allow_local_requests_from_system_hooks,omitempty" json:"allow_local_requests_from_system_hooks,omitempty"` + AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` + AllowProjectCreationForGuestAndBelow *bool `url:"allow_project_creation_for_guest_and_below,omitempty" json:"allow_project_creation_for_guest_and_below,omitempty"` + AllowRunnerRegistrationToken *bool `url:"allow_runner_registration_token,omitempty" json:"allow_runner_registration_token,omitempty"` + ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` + ASCIIDocMaxIncludes *int `url:"asciidoc_max_includes,omitempty" json:"asciidoc_max_includes,omitempty"` + AssetProxyAllowlist *[]string `url:"asset_proxy_allowlist,omitempty" json:"asset_proxy_allowlist,omitempty"` + AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` + AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` + AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` + AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` + AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` + AutoBanUserOnExcessiveProjectsDownload *bool `url:"auto_ban_user_on_excessive_projects_download,omitempty" json:"auto_ban_user_on_excessive_projects_download,omitempty"` + AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` + AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + AutomaticPurchasedStorageAllocation *bool `url:"automatic_purchased_storage_allocation,omitempty" json:"automatic_purchased_storage_allocation,omitempty"` + BulkImportConcurrentPipelineBatchLimit *int `url:"bulk_import_concurrent_pipeline_batch_limit,omitempty" json:"bulk_import_concurrent_pipeline_batch_limit,omitempty"` + BulkImportEnabled *bool `url:"bulk_import_enabled,omitempty" json:"bulk_import_enabled,omitempty"` + BulkImportMaxDownloadFileSize *int `url:"bulk_import_max_download_file_size,omitempty" json:"bulk_import_max_download_file_size,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` + CIMaxIncludes *int `url:"ci_max_includes,omitempty" json:"ci_max_includes,omitempty"` + CIMaxTotalYAMLSizeBytes *int `url:"ci_max_total_yaml_size_bytes,omitempty" json:"ci_max_total_yaml_size_bytes,omitempty"` + CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` + ConcurrentBitbucketImportJobsLimit *int `url:"concurrent_bitbucket_import_jobs_limit,omitempty" json:"concurrent_bitbucket_import_jobs_limit,omitempty"` + ConcurrentBitbucketServerImportJobsLimit *int `url:"concurrent_bitbucket_server_import_jobs_limit,omitempty" json:"concurrent_bitbucket_server_import_jobs_limit,omitempty"` + ConcurrentGitHubImportJobsLimit *int `url:"concurrent_github_import_jobs_limit,omitempty" json:"concurrent_github_import_jobs_limit,omitempty"` + ContainerExpirationPoliciesEnableHistoricEntries *bool `url:"container_expiration_policies_enable_historic_entries,omitempty" json:"container_expiration_policies_enable_historic_entries,omitempty"` + ContainerRegistryCleanupTagsServiceMaxListSize *int `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` + ContainerRegistryDeleteTagsServiceTimeout *int `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` + ContainerRegistryExpirationPoliciesCaching *bool `url:"container_registry_expiration_policies_caching,omitempty" json:"container_registry_expiration_policies_caching,omitempty"` + ContainerRegistryExpirationPoliciesWorkerCapacity *int `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` + ContainerRegistryImportCreatedBefore *time.Time `url:"container_registry_import_created_before,omitempty" json:"container_registry_import_created_before,omitempty"` + ContainerRegistryImportMaxRetries *int `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` + ContainerRegistryImportMaxStepDuration *int `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` + ContainerRegistryImportMaxTagsCount *int `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` + ContainerRegistryImportStartMaxRetries *int `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` + ContainerRegistryImportTargetPlan *string `url:"container_registry_import_target_plan,omitempty" json:"container_registry_import_target_plan,omitempty"` + ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` + CustomHTTPCloneURLRoot *string `url:"custom_http_clone_url_root,omitempty" json:"custom_http_clone_url_root,omitempty"` + DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` + DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` + DeactivateDormantUsers *bool `url:"deactivate_dormant_users,omitempty" json:"deactivate_dormant_users,omitempty"` + DeactivateDormantUsersPeriod *int `url:"deactivate_dormant_users_period,omitempty" json:"deactivate_dormant_users_period,omitempty"` + DecompressArchiveFileTimeout *int `url:"decompress_archive_file_timeout,omitempty" json:"decompress_archive_file_timeout,omitempty"` + DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` + DefaultBranchName *string `url:"default_branch_name,omitempty" json:"default_branch_name,omitempty"` + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + DefaultCiConfigPath *string `url:"default_ci_config_path,omitempty" json:"default_ci_config_path,omitempty"` + DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` + DefaultPreferredLanguage *string `url:"default_preferred_language,omitempty" json:"default_preferred_language,omitempty"` + DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` + DefaultProjectDeletionProtection *bool `url:"default_project_deletion_protection,omitempty" json:"default_project_deletion_protection,omitempty"` + DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` + DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` + DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` + DefaultSyntaxHighlightingTheme *int `url:"default_syntax_highlighting_theme,omitempty" json:"default_syntax_highlighting_theme,omitempty"` + DelayedGroupDeletion *bool `url:"delayed_group_deletion,omitempty" json:"delayed_group_deletion,omitempty"` + DelayedProjectDeletion *bool `url:"delayed_project_deletion,omitempty" json:"delayed_project_deletion,omitempty"` + DeleteInactiveProjects *bool `url:"delete_inactive_projects,omitempty" json:"delete_inactive_projects,omitempty"` + DeleteUnconfirmedUsers *bool `url:"delete_unconfirmed_users,omitempty" json:"delete_unconfirmed_users,omitempty"` + DeletionAdjournedPeriod *int `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` + DiagramsnetEnabled *bool `url:"diagramsnet_enabled,omitempty" json:"diagramsnet_enabled,omitempty"` + DiagramsnetURL *string `url:"diagramsnet_url,omitempty" json:"diagramsnet_url,omitempty"` + DiffMaxFiles *int `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` + DiffMaxLines *int `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` + DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` + DisableFeedToken *bool `url:"disable_feed_token,omitempty" json:"disable_feed_token,omitempty"` + DisableAdminOAuthScopes *bool `url:"disable_admin_oauth_scopes,omitempty" json:"disable_admin_oauth_scopes,omitempty"` + DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` + DisablePersonalAccessTokens *bool `url:"disable_personal_access_tokens,omitempty" json:"disable_personal_access_tokens,omitempty"` + DisabledOauthSignInSources *[]string `url:"disabled_oauth_sign_in_sources,omitempty" json:"disabled_oauth_sign_in_sources,omitempty"` + DomainAllowlist *[]string `url:"domain_allowlist,omitempty" json:"domain_allowlist,omitempty"` + DomainDenylist *[]string `url:"domain_denylist,omitempty" json:"domain_denylist,omitempty"` + DomainDenylistEnabled *bool `url:"domain_denylist_enabled,omitempty" json:"domain_denylist_enabled,omitempty"` + DownstreamPipelineTriggerLimitPerProjectUserSHA *int `url:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty" json:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty"` + DuoFeaturesEnabled *bool `url:"duo_features_enabled,omitempty" json:"duo_features_enabled,omitempty"` + ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` + ECDSASKKeyRestriction *int `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` + EKSAccessKeyID *string `url:"eks_access_key_id,omitempty" json:"eks_access_key_id,omitempty"` + EKSAccountID *string `url:"eks_account_id,omitempty" json:"eks_account_id,omitempty"` + EKSIntegrationEnabled *bool `url:"eks_integration_enabled,omitempty" json:"eks_integration_enabled,omitempty"` + EKSSecretAccessKey *string `url:"eks_secret_access_key,omitempty" json:"eks_secret_access_key,omitempty"` + Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` + Ed25519SKKeyRestriction *int `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` + ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` + ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` + ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` + ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` + ElasticsearchAnalyzersKuromojiEnabled *bool `url:"elasticsearch_analyzers_kuromoji_enabled,omitempty" json:"elasticsearch_analyzers_kuromoji_enabled,omitempty"` + ElasticsearchAnalyzersKuromojiSearch *int `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` + ElasticsearchAnalyzersSmartCNEnabled *bool `url:"elasticsearch_analyzers_smartcn_enabled,omitempty" json:"elasticsearch_analyzers_smartcn_enabled,omitempty"` + ElasticsearchAnalyzersSmartCNSearch *int `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` + ElasticsearchClientRequestTimeout *int `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` + ElasticsearchIndexedFieldLengthLimit *int `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` + ElasticsearchIndexedFileSizeLimitKB *int `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` + ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` + ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` + ElasticsearchMaxBulkConcurrency *int `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` + ElasticsearchMaxBulkSizeMB *int `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` + ElasticsearchMaxCodeIndexingConcurrency *int `url:"elasticsearch_max_code_indexing_concurrency,omitempty" json:"elasticsearch_max_code_indexing_concurrency,omitempty"` + ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` + ElasticsearchPassword *string `url:"elasticsearch_password,omitempty" json:"elasticsearch_password,omitempty"` + ElasticsearchPauseIndexing *bool `url:"elasticsearch_pause_indexing,omitempty" json:"elasticsearch_pause_indexing,omitempty"` + ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` + ElasticsearchReplicas *int `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` + ElasticsearchRequeueWorkers *bool `url:"elasticsearch_requeue_workers,omitempty" json:"elasticsearch_requeue_workers,omitempty"` + ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` + ElasticsearchShards *int `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` + ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` + ElasticsearchUsername *string `url:"elasticsearch_username,omitempty" json:"elasticsearch_username,omitempty"` + ElasticsearchWorkerNumberOfShards *int `url:"elasticsearch_worker_number_of_shards,omitempty" json:"elasticsearch_worker_number_of_shards,omitempty"` + EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` + EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` + EmailConfirmationSetting *string `url:"email_confirmation_setting,omitempty" json:"email_confirmation_setting,omitempty"` + EmailRestrictions *string `url:"email_restrictions,omitempty" json:"email_restrictions,omitempty"` + EmailRestrictionsEnabled *bool `url:"email_restrictions_enabled,omitempty" json:"email_restrictions_enabled,omitempty"` + EnableArtifactExternalRedirectWarningPage *bool `url:"enable_artifact_external_redirect_warning_page,omitempty" json:"enable_artifact_external_redirect_warning_page,omitempty"` + EnabledGitAccessProtocol *string `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` + EnforceNamespaceStorageLimit *bool `url:"enforce_namespace_storage_limit,omitempty" json:"enforce_namespace_storage_limit,omitempty"` + EnforcePATExpiration *bool `url:"enforce_pat_expiration,omitempty" json:"enforce_pat_expiration,omitempty"` + EnforceSSHKeyExpiration *bool `url:"enforce_ssh_key_expiration,omitempty" json:"enforce_ssh_key_expiration,omitempty"` + EnforceTerms *bool `url:"enforce_terms,omitempty" json:"enforce_terms,omitempty"` + ExternalAuthClientCert *string `url:"external_auth_client_cert,omitempty" json:"external_auth_client_cert,omitempty"` + ExternalAuthClientKey *string `url:"external_auth_client_key,omitempty" json:"external_auth_client_key,omitempty"` + ExternalAuthClientKeyPass *string `url:"external_auth_client_key_pass,omitempty" json:"external_auth_client_key_pass,omitempty"` + ExternalAuthorizationServiceDefaultLabel *string `url:"external_authorization_service_default_label,omitempty" json:"external_authorization_service_default_label,omitempty"` + ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` + ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` + ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` + ExternalPipelineValidationServiceTimeout *int `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` + ExternalPipelineValidationServiceToken *string `url:"external_pipeline_validation_service_token,omitempty" json:"external_pipeline_validation_service_token,omitempty"` + ExternalPipelineValidationServiceURL *string `url:"external_pipeline_validation_service_url,omitempty" json:"external_pipeline_validation_service_url,omitempty"` + FailedLoginAttemptsUnlockPeriodInMinutes *int `url:"failed_login_attempts_unlock_period_in_minutes,omitempty" json:"failed_login_attempts_unlock_period_in_minutes,omitempty"` + FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` + FlocEnabled *bool `url:"floc_enabled,omitempty" json:"floc_enabled,omitempty"` + GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` + GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` + GitRateLimitUsersAlertlist *[]string `url:"git_rate_limit_users_alertlist,omitempty" json:"git_rate_limit_users_alertlist,omitempty"` + GitTwoFactorSessionExpiry *int `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` + GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` + GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` + GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` + GitlabDedicatedInstance *bool `url:"gitlab_dedicated_instance,omitempty" json:"gitlab_dedicated_instance,omitempty"` + GitlabEnvironmentToolkitInstance *bool `url:"gitlab_environment_toolkit_instance,omitempty" json:"gitlab_environment_toolkit_instance,omitempty"` + GitlabShellOperationLimit *int `url:"gitlab_shell_operation_limit,omitempty" json:"gitlab_shell_operation_limit,omitempty"` + GitpodEnabled *bool `url:"gitpod_enabled,omitempty" json:"gitpod_enabled,omitempty"` + GitpodURL *string `url:"gitpod_url,omitempty" json:"gitpod_url,omitempty"` + GitRateLimitUsersAllowlist *[]string `url:"git_rate_limit_users_allowlist,omitempty" json:"git_rate_limit_users_allowlist,omitempty"` + GloballyAllowedIPs *string `url:"globally_allowed_ips,omitempty" json:"globally_allowed_ips,omitempty"` + GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` + GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` + GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` + GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` + GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` + GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` + GroupOwnersCanManageDefaultBranchProtection *bool `url:"group_owners_can_manage_default_branch_protection,omitempty" json:"group_owners_can_manage_default_branch_protection,omitempty"` + GroupRunnerTokenExpirationInterval *int `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` + HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` + HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` + HelpPageDocumentationBaseURL *string `url:"help_page_documentation_base_url,omitempty" json:"help_page_documentation_base_url,omitempty"` + HelpPageHideCommercialContent *bool `url:"help_page_hide_commercial_content,omitempty" json:"help_page_hide_commercial_content,omitempty"` + HelpPageSupportURL *string `url:"help_page_support_url,omitempty" json:"help_page_support_url,omitempty"` + HelpPageText *string `url:"help_page_text,omitempty" json:"help_page_text,omitempty"` + HelpText *string `url:"help_text,omitempty" json:"help_text,omitempty"` + HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` + HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` + HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` + HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` + HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` + HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` + HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` + HousekeepingOptimizeRepositoryPeriod *int `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"` + ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` + InactiveProjectsDeleteAfterMonths *int `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` + InactiveProjectsMinSizeMB *int `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` + InactiveProjectsSendWarningEmailAfterMonths *int `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` + IncludeOptionalMetricsInServicePing *bool `url:"include_optional_metrics_in_service_ping,omitempty" json:"include_optional_metrics_in_service_ping,omitempty"` + InProductMarketingEmailsEnabled *bool `url:"in_product_marketing_emails_enabled,omitempty" json:"in_product_marketing_emails_enabled,omitempty"` + InvisibleCaptchaEnabled *bool `url:"invisible_captcha_enabled,omitempty" json:"invisible_captcha_enabled,omitempty"` + IssuesCreateLimit *int `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` + JiraConnectApplicationKey *string `url:"jira_connect_application_key,omitempty" json:"jira_connect_application_key,omitempty"` + JiraConnectPublicKeyStorageEnabled *bool `url:"jira_connect_public_key_storage_enabled,omitempty" json:"jira_connect_public_key_storage_enabled,omitempty"` + JiraConnectProxyURL *string `url:"jira_connect_proxy_url,omitempty" json:"jira_connect_proxy_url,omitempty"` + KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` + KrokiEnabled *bool `url:"kroki_enabled,omitempty" json:"kroki_enabled,omitempty"` + KrokiFormats *map[string]bool `url:"kroki_formats,omitempty" json:"kroki_formats,omitempty"` + KrokiURL *string `url:"kroki_url,omitempty" json:"kroki_url,omitempty"` + LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` + LockDuoFeaturesEnabled *bool `url:"lock_duo_features_enabled,omitempty" json:"lock_duo_features_enabled,omitempty"` + LockMembershipsToLDAP *bool `url:"lock_memberships_to_ldap,omitempty" json:"lock_memberships_to_ldap,omitempty"` + LoginRecaptchaProtectionEnabled *bool `url:"login_recaptcha_protection_enabled,omitempty" json:"login_recaptcha_protection_enabled,omitempty"` + MailgunEventsEnabled *bool `url:"mailgun_events_enabled,omitempty" json:"mailgun_events_enabled,omitempty"` + MailgunSigningKey *string `url:"mailgun_signing_key,omitempty" json:"mailgun_signing_key,omitempty"` + MaintenanceMode *bool `url:"maintenance_mode,omitempty" json:"maintenance_mode,omitempty"` + MaintenanceModeMessage *string `url:"maintenance_mode_message,omitempty" json:"maintenance_mode_message,omitempty"` + MavenPackageRequestsForwarding *bool `url:"maven_package_requests_forwarding,omitempty" json:"maven_package_requests_forwarding,omitempty"` + MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` + MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` + MaxDecompressedArchiveSize *int `url:"max_decompressed_archive_size,omitempty" json:"max_decompressed_archive_size,omitempty"` + MaxExportSize *int `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` + MaxImportRemoteFileSize *int `url:"max_import_remote_file_size,omitempty" json:"max_import_remote_file_size,omitempty"` + MaxImportSize *int `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` + MaxLoginAttempts *int `url:"max_login_attempts,omitempty" json:"max_login_attempts,omitempty"` + MaxNumberOfRepositoryDownloads *int `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod *int `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` + MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` + MaxPersonalAccessTokenLifetime *int `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` + MaxSSHKeyLifetime *int `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` + MaxTerraformStateSizeBytes *int `url:"max_terraform_state_size_bytes,omitempty" json:"max_terraform_state_size_bytes,omitempty"` + MaxYAMLDepth *int `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` + MaxYAMLSizeBytes *int `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` + MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` + MinimumPasswordLength *int `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` + MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` + MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` + MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` + MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` + NPMPackageRequestsForwarding *bool `url:"npm_package_requests_forwarding,omitempty" json:"npm_package_requests_forwarding,omitempty"` + NotesCreateLimit *int `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` + NotifyOnUnknownSignIn *bool `url:"notify_on_unknown_sign_in,omitempty" json:"notify_on_unknown_sign_in,omitempty"` + NugetSkipMetadataURLValidation *bool `url:"nuget_skip_metadata_url_validation,omitempty" json:"nuget_skip_metadata_url_validation,omitempty"` + OutboundLocalRequestsAllowlistRaw *string `url:"outbound_local_requests_allowlist_raw,omitempty" json:"outbound_local_requests_allowlist_raw,omitempty"` + OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` + PackageMetadataPURLTypes *[]int `url:"package_metadata_purl_types,omitempty" json:"package_metadata_purl_types,omitempty"` + PackageRegistryAllowAnyoneToPullOption *bool `url:"package_registry_allow_anyone_to_pull_option,omitempty" json:"package_registry_allow_anyone_to_pull_option,omitempty"` + PackageRegistryCleanupPoliciesWorkerCapacity *int `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` + PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` + PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` + PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` + PasswordNumberRequired *bool `url:"password_number_required,omitempty" json:"password_number_required,omitempty"` + PasswordSymbolRequired *bool `url:"password_symbol_required,omitempty" json:"password_symbol_required,omitempty"` + PasswordUppercaseRequired *bool `url:"password_uppercase_required,omitempty" json:"password_uppercase_required,omitempty"` + PasswordLowercaseRequired *bool `url:"password_lowercase_required,omitempty" json:"password_lowercase_required,omitempty"` + PerformanceBarAllowedGroupID *int `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` + PerformanceBarAllowedGroupPath *string `url:"performance_bar_allowed_group_path,omitempty" json:"performance_bar_allowed_group_path,omitempty"` + PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` + PersonalAccessTokenPrefix *string `url:"personal_access_token_prefix,omitempty" json:"personal_access_token_prefix,omitempty"` + PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` + PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` + PipelineLimitPerProjectUserSha *int `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` + PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` + PreventMergeRequestsAuthorApproval *bool `url:"prevent_merge_requests_author_approval,omitempty" json:"prevent_merge_requests_author_approval,omitempty"` + PreventMergeRequestsCommittersApproval *bool `url:"prevent_merge_requests_committers_approval,omitempty" json:"prevent_merge_requests_committers_approval,omitempty"` + ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` + ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` + ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` + ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` + ProjectJobsAPIRateLimit *int `url:"project_jobs_api_rate_limit,omitempty" json:"project_jobs_api_rate_limit,omitempty"` + ProjectRunnerTokenExpirationInterval *int `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` + ProjectsAPIRateLimitUnauthenticated *int `url:"projects_api_rate_limit_unauthenticated,omitempty" json:"projects_api_rate_limit_unauthenticated,omitempty"` + PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` + ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` + PseudonymizerEnabled *bool `url:"pseudonymizer_enabled,omitempty" json:"pseudonymizer_enabled,omitempty"` + PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` + PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` + PyPIPackageRequestsForwarding *bool `url:"pypi_package_requests_forwarding,omitempty" json:"pypi_package_requests_forwarding,omitempty"` + RSAKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` + RateLimitingResponseText *string `url:"rate_limiting_response_text,omitempty" json:"rate_limiting_response_text,omitempty"` + RawBlobRequestLimit *int `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` + RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` + RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` + RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` + ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` + ReceptiveClusterAgentsEnabled *bool `url:"receptive_cluster_agents_enabled,omitempty" json:"receptive_cluster_agents_enabled,omitempty"` + RememberMeEnabled *bool `url:"remember_me_enabled,omitempty" json:"remember_me_enabled,omitempty"` + RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` + RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` + RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` + RepositoryStoragesWeighted *map[string]int `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` + RequireAdminApprovalAfterUserSignup *bool `url:"require_admin_approval_after_user_signup,omitempty" json:"require_admin_approval_after_user_signup,omitempty"` + RequireAdminTwoFactorAuthentication *bool `url:"require_admin_two_factor_authentication,omitempty" json:"require_admin_two_factor_authentication,omitempty"` + RequirePersonalAccessTokenExpiry *bool `url:"require_personal_access_token_expiry,omitempty" json:"require_personal_access_token_expiry,omitempty"` + RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` + RunnerTokenExpirationInterval *int `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` + SearchRateLimit *int `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` + SearchRateLimitUnauthenticated *int `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` + SecretDetectionRevocationTokenTypesURL *string `url:"secret_detection_revocation_token_types_url,omitempty" json:"secret_detection_revocation_token_types_url,omitempty"` + SecretDetectionTokenRevocationEnabled *bool `url:"secret_detection_token_revocation_enabled,omitempty" json:"secret_detection_token_revocation_enabled,omitempty"` + SecretDetectionTokenRevocationToken *string `url:"secret_detection_token_revocation_token,omitempty" json:"secret_detection_token_revocation_token,omitempty"` + SecretDetectionTokenRevocationURL *string `url:"secret_detection_token_revocation_url,omitempty" json:"secret_detection_token_revocation_url,omitempty"` + SecurityApprovalPoliciesLimit *int `url:"security_approval_policies_limit,omitempty" json:"security_approval_policies_limit,omitempty"` + SecurityPolicyGlobalGroupApproversEnabled *bool `url:"security_policy_global_group_approvers_enabled,omitempty" json:"security_policy_global_group_approvers_enabled,omitempty"` + SecurityTXTContent *string `url:"security_txt_content,omitempty" json:"security_txt_content,omitempty"` + SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` + SentryClientsideDSN *string `url:"sentry_clientside_dsn,omitempty" json:"sentry_clientside_dsn,omitempty"` + SentryDSN *string `url:"sentry_dsn,omitempty" json:"sentry_dsn,omitempty"` + SentryEnabled *string `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` + SentryEnvironment *string `url:"sentry_environment,omitempty" json:"sentry_environment,omitempty"` + ServiceAccessTokensExpirationEnforced *bool `url:"service_access_tokens_expiration_enforced,omitempty" json:"service_access_tokens_expiration_enforced,omitempty"` + SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` + SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` + SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` + SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` + SidekiqJobLimiterCompressionThresholdBytes *int `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` + SidekiqJobLimiterLimitBytes *int `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` + SidekiqJobLimiterMode *string `url:"sidekiq_job_limiter_mode,omitempty" json:"sidekiq_job_limiter_mode,omitempty"` + SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` + SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` + SilentAdminExportsEnabled *bool `url:"silent_admin_exports_enabled,omitempty" json:"silent_admin_exports_enabled,omitempty"` + SilentModeEnabled *bool `url:"silent_mode_enabled,omitempty" json:"silent_mode_enabled,omitempty"` + SlackAppEnabled *bool `url:"slack_app_enabled,omitempty" json:"slack_app_enabled,omitempty"` + SlackAppID *string `url:"slack_app_id,omitempty" json:"slack_app_id,omitempty"` + SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` + SlackAppSigningSecret *string `url:"slack_app_signing_secret,omitempty" json:"slack_app_signing_secret,omitempty"` + SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` + SnippetSizeLimit *int `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` + SnowplowAppID *string `url:"snowplow_app_id,omitempty" json:"snowplow_app_id,omitempty"` + SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` + SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` + SnowplowDatabaseCollectorHostname *string `url:"snowplow_database_collector_hostname,omitempty" json:"snowplow_database_collector_hostname,omitempty"` + SnowplowEnabled *bool `url:"snowplow_enabled,omitempty" json:"snowplow_enabled,omitempty"` + SourcegraphEnabled *bool `url:"sourcegraph_enabled,omitempty" json:"sourcegraph_enabled,omitempty"` + SourcegraphPublicOnly *bool `url:"sourcegraph_public_only,omitempty" json:"sourcegraph_public_only,omitempty"` + SourcegraphURL *string `url:"sourcegraph_url,omitempty" json:"sourcegraph_url,omitempty"` + SpamCheckAPIKey *string `url:"spam_check_api_key,omitempty" json:"spam_check_api_key,omitempty"` + SpamCheckEndpointEnabled *bool `url:"spam_check_endpoint_enabled,omitempty" json:"spam_check_endpoint_enabled,omitempty"` + SpamCheckEndpointURL *string `url:"spam_check_endpoint_url,omitempty" json:"spam_check_endpoint_url,omitempty"` + StaticObjectsExternalStorageAuthToken *string `url:"static_objects_external_storage_auth_token,omitempty" json:"static_objects_external_storage_auth_token,omitempty"` + StaticObjectsExternalStorageURL *string `url:"static_objects_external_storage_url,omitempty" json:"static_objects_external_storage_url,omitempty"` + SuggestPipelineEnabled *bool `url:"suggest_pipeline_enabled,omitempty" json:"suggest_pipeline_enabled,omitempty"` + TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` + Terms *string `url:"terms,omitempty" json:"terms,omitempty"` + ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` + ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedDeprecatedAPIEnabled *bool `url:"throttle_authenticated_deprecated_api_enabled,omitempty" json:"throttle_authenticated_deprecated_api_enabled,omitempty"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedFilesAPIEnabled *bool `url:"throttle_authenticated_files_api_enabled,omitempty" json:"throttle_authenticated_files_api_enabled,omitempty"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` + ThrottleAuthenticatedGitLFSEnabled *bool `url:"throttle_authenticated_git_lfs_enabled,omitempty" json:"throttle_authenticated_git_lfs_enabled,omitempty"` + ThrottleAuthenticatedGitLFSPeriodInSeconds *int `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` + ThrottleAuthenticatedPackagesAPIEnabled *bool `url:"throttle_authenticated_packages_api_enabled,omitempty" json:"throttle_authenticated_packages_api_enabled,omitempty"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` + ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` + ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` + ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` + ThrottleIncidentManagementNotificationEnabled *bool `url:"throttle_incident_management_notification_enabled,omitempty" json:"throttle_incident_management_notification_enabled,omitempty"` + ThrottleIncidentManagementNotificationPerPeriod *int `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` + ThrottleIncidentManagementNotificationPeriodInSeconds *int `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` + ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` + ThrottleProtectedPathsPeriodInSeconds *int `url:"throttle_protected_paths_enabled_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` + ThrottleProtectedPathsRequestsPerPeriod *int `url:"throttle_protected_paths_enabled_requests_per_period,omitempty" json:"throttle_protected_paths_per_period,omitempty"` + ThrottleUnauthenticatedAPIEnabled *bool `url:"throttle_unauthenticated_api_enabled,omitempty" json:"throttle_unauthenticated_api_enabled,omitempty"` + ThrottleUnauthenticatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIEnabled *bool `url:"throttle_unauthenticated_deprecated_api_enabled,omitempty" json:"throttle_unauthenticated_deprecated_api_enabled,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` + ThrottleUnauthenticatedFilesAPIEnabled *bool `url:"throttle_unauthenticated_files_api_enabled,omitempty" json:"throttle_unauthenticated_files_api_enabled,omitempty"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedGitLFSEnabled *bool `url:"throttle_unauthenticated_git_lfs_enabled,omitempty" json:"throttle_unauthenticated_git_lfs_enabled,omitempty"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds *int `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` + ThrottleUnauthenticatedPackagesAPIEnabled *bool `url:"throttle_unauthenticated_packages_api_enabled,omitempty" json:"throttle_unauthenticated_packages_api_enabled,omitempty"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` + ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` + ThrottleUnauthenticatedWebEnabled *bool `url:"throttle_unauthenticated_web_enabled,omitempty" json:"throttle_unauthenticated_web_enabled,omitempty"` + ThrottleUnauthenticatedWebPeriodInSeconds *int `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` + ThrottleUnauthenticatedWebRequestsPerPeriod *int `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` + TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + UnconfirmedUsersDeleteAfterDays *int `url:"unconfirmed_users_delete_after_days,omitempty" json:"unconfirmed_users_delete_after_days,omitempty"` + UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` + UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` + UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` + UpdateRunnerVersionsEnabled *bool `url:"update_runner_versions_enabled,omitempty" json:"update_runner_versions_enabled,omitempty"` + UpdatingNameDisabledForUsers *bool `url:"updating_name_disabled_for_users,omitempty" json:"updating_name_disabled_for_users,omitempty"` + UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` + UsagePingFeaturesEnabled *bool `url:"usage_ping_features_enabled,omitempty" json:"usage_ping_features_enabled,omitempty"` + UseClickhouseForAnalytics *bool `url:"use_clickhouse_for_analytics,omitempty" json:"use_clickhouse_for_analytics,omitempty"` + UserDeactivationEmailsEnabled *bool `url:"user_deactivation_emails_enabled,omitempty" json:"user_deactivation_emails_enabled,omitempty"` + UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` + UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` + UserDefaultsToPrivateProfile *bool `url:"user_defaults_to_private_profile,omitempty" json:"user_defaults_to_private_profile,omitempty"` + UserEmailLookupLimit *int `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` + UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` + UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` + UsersGetByIDLimit *int `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` + UsersGetByIDLimitAllowlistRaw *string `url:"users_get_by_id_limit_allowlist_raw,omitempty" json:"users_get_by_id_limit_allowlist_raw,omitempty"` + ValidRunnerRegistrars *[]string `url:"valid_runner_registrars,omitempty" json:"valid_runner_registrars,omitempty"` + VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` + WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` + WhatsNewVariant *string `url:"whats_new_variant,omitempty" json:"whats_new_variant,omitempty"` + WikiPageMaxContentBytes *int `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` +} + +// BranchProtectionDefaultsOptions represents default Git protected branch permissions options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type BranchProtectionDefaultsOptions struct { + AllowedToPush *[]int `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToMerge *[]int `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` +} + +// UpdateSettings updates the application settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/settings.html#change-application-settings +func (s *SettingsService) UpdateSettings(opt *UpdateSettingsOptions, options ...RequestOptionFunc) (*Settings, *Response, error) { + req, err := s.client.NewRequest(http.MethodPut, "application/settings", opt, options) + if err != nil { + return nil, nil, err + } + + as := new(Settings) + resp, err := s.client.Do(req, as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go b/vendor/gitlab.com/gitlab-org/api/client-go/sidekiq_metrics.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go rename to vendor/gitlab.com/gitlab-org/api/client-go/sidekiq_metrics.go diff --git a/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go b/vendor/gitlab.com/gitlab-org/api/client-go/snippet_repository_storage_move.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go rename to vendor/gitlab.com/gitlab-org/api/client-go/snippet_repository_storage_move.go diff --git a/vendor/github.com/xanzy/go-gitlab/snippets.go b/vendor/gitlab.com/gitlab-org/api/client-go/snippets.go similarity index 99% rename from vendor/github.com/xanzy/go-gitlab/snippets.go rename to vendor/gitlab.com/gitlab-org/api/client-go/snippets.go index 7c7c8084..3cb48277 100644 --- a/vendor/github.com/xanzy/go-gitlab/snippets.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/snippets.go @@ -50,6 +50,7 @@ type Snippet struct { } `json:"author"` UpdatedAt *time.Time `json:"updated_at"` CreatedAt *time.Time `json:"created_at"` + ProjectID int `json:"project_id"` WebURL string `json:"web_url"` RawURL string `json:"raw_url"` Files []struct { diff --git a/vendor/github.com/xanzy/go-gitlab/strings.go b/vendor/gitlab.com/gitlab-org/api/client-go/strings.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/strings.go rename to vendor/gitlab.com/gitlab-org/api/client-go/strings.go diff --git a/vendor/github.com/xanzy/go-gitlab/system_hooks.go b/vendor/gitlab.com/gitlab-org/api/client-go/system_hooks.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/system_hooks.go rename to vendor/gitlab.com/gitlab-org/api/client-go/system_hooks.go diff --git a/vendor/github.com/xanzy/go-gitlab/tags.go b/vendor/gitlab.com/gitlab-org/api/client-go/tags.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/tags.go rename to vendor/gitlab.com/gitlab-org/api/client-go/tags.go diff --git a/vendor/github.com/xanzy/go-gitlab/time_stats.go b/vendor/gitlab.com/gitlab-org/api/client-go/time_stats.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/time_stats.go rename to vendor/gitlab.com/gitlab-org/api/client-go/time_stats.go diff --git a/vendor/github.com/xanzy/go-gitlab/todos.go b/vendor/gitlab.com/gitlab-org/api/client-go/todos.go similarity index 99% rename from vendor/github.com/xanzy/go-gitlab/todos.go rename to vendor/gitlab.com/gitlab-org/api/client-go/todos.go index 52c218ee..2e26b707 100644 --- a/vendor/github.com/xanzy/go-gitlab/todos.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/todos.go @@ -82,6 +82,7 @@ type TodoTarget struct { Weight int `json:"weight"` // Only available for type MergeRequest + MergedAt *time.Time `json:"merged_at"` ApprovalsBeforeMerge int `json:"approvals_before_merge"` ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` MergeCommitSHA string `json:"merge_commit_sha"` diff --git a/vendor/github.com/xanzy/go-gitlab/topics.go b/vendor/gitlab.com/gitlab-org/api/client-go/topics.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/topics.go rename to vendor/gitlab.com/gitlab-org/api/client-go/topics.go diff --git a/vendor/github.com/xanzy/go-gitlab/types.go b/vendor/gitlab.com/gitlab-org/api/client-go/types.go similarity index 78% rename from vendor/github.com/xanzy/go-gitlab/types.go rename to vendor/gitlab.com/gitlab-org/api/client-go/types.go index 50e03ed5..df1231db 100644 --- a/vendor/github.com/xanzy/go-gitlab/types.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/types.go @@ -62,7 +62,7 @@ func AccessControl(v AccessControlValue) *AccessControlValue { // GitLab API docs: https://docs.gitlab.com/ee/user/permissions.html type AccessLevelValue int -// List of available access levels +// List of available access levels. // // GitLab API docs: https://docs.gitlab.com/ee/user/permissions.html const ( @@ -89,6 +89,11 @@ func AccessLevel(v AccessLevelValue) *AccessLevelValue { return Ptr(v) } +type AccessLevelDetails struct { + IntegerValue AccessLevelValue `json:"integer_value"` + StringValue string `json:"string_value"` +} + // UserIDValue represents a user ID value within GitLab. type UserIDValue string @@ -113,7 +118,7 @@ func ApproverIDs(v interface{}) *ApproverIDsValue { } } -// EncodeValues implements the query.Encoder interface +// EncodeValues implements the query.Encoder interface. func (a *ApproverIDsValue) EncodeValues(key string, v *url.Values) error { switch value := a.value.(type) { case UserIDValue: @@ -128,12 +133,12 @@ func (a *ApproverIDsValue) EncodeValues(key string, v *url.Values) error { return nil } -// MarshalJSON implements the json.Marshaler interface +// MarshalJSON implements the json.Marshaler interface. func (a ApproverIDsValue) MarshalJSON() ([]byte, error) { return json.Marshal(a.value) } -// UnmarshalJSON implements the json.Unmarshaler interface +// UnmarshalJSON implements the json.Unmarshaler interface. func (a *ApproverIDsValue) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, a.value) } @@ -153,7 +158,7 @@ func AssigneeID(v interface{}) *AssigneeIDValue { } } -// EncodeValues implements the query.Encoder interface +// EncodeValues implements the query.Encoder interface. func (a *AssigneeIDValue) EncodeValues(key string, v *url.Values) error { switch value := a.value.(type) { case UserIDValue: @@ -164,12 +169,12 @@ func (a *AssigneeIDValue) EncodeValues(key string, v *url.Values) error { return nil } -// MarshalJSON implements the json.Marshaler interface +// MarshalJSON implements the json.Marshaler interface. func (a AssigneeIDValue) MarshalJSON() ([]byte, error) { return json.Marshal(a.value) } -// UnmarshalJSON implements the json.Unmarshaler interface +// UnmarshalJSON implements the json.Unmarshaler interface. func (a *AssigneeIDValue) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, a.value) } @@ -189,7 +194,7 @@ func ReviewerID(v interface{}) *ReviewerIDValue { } } -// EncodeValues implements the query.Encoder interface +// EncodeValues implements the query.Encoder interface. func (a *ReviewerIDValue) EncodeValues(key string, v *url.Values) error { switch value := a.value.(type) { case UserIDValue: @@ -200,12 +205,12 @@ func (a *ReviewerIDValue) EncodeValues(key string, v *url.Values) error { return nil } -// MarshalJSON implements the json.Marshaler interface +// MarshalJSON implements the json.Marshaler interface. func (a ReviewerIDValue) MarshalJSON() ([]byte, error) { return json.Marshal(a.value) } -// UnmarshalJSON implements the json.Unmarshaler interface +// UnmarshalJSON implements the json.Unmarshaler interface. func (a *ReviewerIDValue) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, a.value) } @@ -256,6 +261,33 @@ func BuildState(v BuildStateValue) *BuildStateValue { return Ptr(v) } +// CommentEventAction identifies if a comment has been newly created or updated. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-events +type CommentEventAction string + +const ( + CommentEventActionCreate CommentEventAction = "create" + CommentEventActionUpdate CommentEventAction = "update" +) + +// ContainerRegistryStatus represents the status of a Container Registry. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repositories +type ContainerRegistryStatus string + +// ContainerRegistryStatus represents all valid statuses of a Container Registry. +// +// Undocumented, see code at: +// https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/models/container_repository.rb?ref_type=heads#L35 +const ( + ContainerRegistryStatusDeleteScheduled ContainerRegistryStatus = "delete_scheduled" + ContainerRegistryStatusDeleteFailed ContainerRegistryStatus = "delete_failed" + ContainerRegistryStatusDeleteOngoing ContainerRegistryStatus = "delete_ongoing" +) + // DeploymentApprovalStatus represents a Gitlab deployment approval status. type DeploymentApprovalStatus string @@ -285,12 +317,43 @@ func DeploymentStatus(v DeploymentStatusValue) *DeploymentStatusValue { return Ptr(v) } -// EventTypeValue represents actions type for contribution events +// DORAMetricType represents all valid DORA metrics types. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricType string + +// List of available DORA metric type names. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +const ( + DORAMetricDeploymentFrequency DORAMetricType = "deployment_frequency" + DORAMetricLeadTimeForChanges DORAMetricType = "lead_time_for_changes" + DORAMetricTimeToRestoreService DORAMetricType = "time_to_restore_service" + DORAMetricChangeFailureRate DORAMetricType = "change_failure_rate" +) + +// DORAMetricInterval represents the time period over which the +// metrics are aggregated. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricInterval string + +// List of available DORA metric interval types. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +const ( + DORAMetricIntervalDaily DORAMetricInterval = "daily" + DORAMetricIntervalMonthly DORAMetricInterval = "monthly" + DORAMetricIntervalAll DORAMetricInterval = "all" +) + +// EventTypeValue represents actions type for contribution events. type EventTypeValue string -// List of available action type +// List of available action type. // -// GitLab API docs: https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events +// GitLab API docs: +// https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events const ( CreatedEventType EventTypeValue = "created" UpdatedEventType EventTypeValue = "updated" @@ -305,10 +368,10 @@ const ( ExpiredEventType EventTypeValue = "expired" ) -// EventTargetTypeValue represents actions type value for contribution events +// EventTargetTypeValue represents actions type value for contribution events. type EventTargetTypeValue string -// List of available action type +// List of available action type. // // GitLab API docs: https://docs.gitlab.com/ee/api/events.html#target-types const ( @@ -323,7 +386,8 @@ const ( // FileActionValue represents the available actions that can be performed on a file. // -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions type FileActionValue string // The available file actions. @@ -376,10 +440,30 @@ func GenericPackageStatus(v GenericPackageStatusValue) *GenericPackageStatusValu return Ptr(v) } +// GroupHookTrigger represents the type of event to trigger for a group +// hook test. +type GroupHookTrigger string + +// List of available group hook trigger types. +const ( + GroupHookTriggerPush GroupHookTrigger = "push_events" + GroupHookTriggerTagPush GroupHookTrigger = "tag_push_events" + GroupHookTriggerIssue GroupHookTrigger = "issues_events" + GroupHookTriggerConfidentialIssue GroupHookTrigger = "confidential_issues_events" + GroupHookTriggerNote GroupHookTrigger = "note_events" + GroupHookTriggerMergeRequest GroupHookTrigger = "merge_requests_events" + GroupHookTriggerJob GroupHookTrigger = "job_events" + GroupHookTriggerPipeline GroupHookTrigger = "pipeline_events" + GroupHookTriggerWikiPage GroupHookTrigger = "wiki_page_events" + GroupHookTriggerRelease GroupHookTrigger = "releases_events" + GroupHookTriggerEmoji GroupHookTrigger = "emoji_events" + GroupHookTriggerResourceAccessToken GroupHookTrigger = "resource_access_token_events" +) + // ISOTime represents an ISO 8601 formatted date. type ISOTime time.Time -// ISO 8601 date format +// ISO 8601 date format. const iso8601 = "2006-01-02" // ParseISOTime parses an ISO 8601 formatted date. @@ -457,7 +541,7 @@ func (l *LabelOptions) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, (*alias)(l)) } -// EncodeValues implements the query.EncodeValues interface +// EncodeValues implements the query.EncodeValues interface. func (l *LabelOptions) EncodeValues(key string, v *url.Values) error { v.Set(key, strings.Join(*l, ",")) return nil @@ -629,6 +713,7 @@ const ( NoOneProjectCreation ProjectCreationLevelValue = "noone" MaintainerProjectCreation ProjectCreationLevelValue = "maintainer" DeveloperProjectCreation ProjectCreationLevelValue = "developer" + OwnerProjectCreation ProjectCreationLevelValue = "owner" ) // ProjectCreationLevel is a helper routine that allocates a new ProjectCreationLevelValue @@ -638,6 +723,46 @@ func ProjectCreationLevel(v ProjectCreationLevelValue) *ProjectCreationLevelValu return Ptr(v) } +// ProjectHookEvent represents a project hook event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#hook-events +type ProjectHookEvent string + +// List of available project hook events. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#hook-events +const ( + ProjectHookEventPush ProjectHookEvent = "push_events" + ProjectHookEventTagPush ProjectHookEvent = "tag_push_events" + ProjectHookEventIssues ProjectHookEvent = "issues_events" + ProjectHookEventConfidentialIssues ProjectHookEvent = "confidential_issues_events" + ProjectHookEventNote ProjectHookEvent = "note_events" + ProjectHookEventMergeRequests ProjectHookEvent = "merge_requests_events" + ProjectHookEventJob ProjectHookEvent = "job_events" + ProjectHookEventPipeline ProjectHookEvent = "pipeline_events" + ProjectHookEventWiki ProjectHookEvent = "wiki_page_events" + ProjectHookEventReleases ProjectHookEvent = "releases_events" + ProjectHookEventEmoji ProjectHookEvent = "emoji_events" + ProjectHookEventResourceAccessToken ProjectHookEvent = "resource_access_token_events" +) + +// ResourceGroupProcessMode represents a process mode for a resource group +// within a GitLab project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/ci/resource_groups/index.html#process-modes +type ResourceGroupProcessMode string + +// List of available resource group process modes. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/ci/resource_groups/index.html#process-modes +const ( + Unordered ResourceGroupProcessMode = "unordered" + OldestFirst ResourceGroupProcessMode = "oldest_first" + NewestFirst ResourceGroupProcessMode = "newest_first" +) + // SharedRunnersSettingValue determines whether shared runners are enabled for a // group’s subgroups and projects. // @@ -654,7 +779,8 @@ const ( DisabledAndOverridableSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_and_overridable" DisabledAndUnoverridableSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_and_unoverridable" - // Deprecated: DisabledWithOverrideSharedRunnersSettingValue is deprecated in favor of DisabledAndOverridableSharedRunnersSettingValue + // Deprecated: DisabledWithOverrideSharedRunnersSettingValue is deprecated + // in favor of DisabledAndOverridableSharedRunnersSettingValue. DisabledWithOverrideSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_with_override" ) @@ -877,3 +1003,19 @@ func (t *BoolValue) UnmarshalJSON(b []byte) error { return err } } + +// CIPipelineVariablesMinimumOverrideRoleValue represents an access control +// value used for managing access to the CI Pipeline Variable Override feature. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html +type CIPipelineVariablesMinimumOverrideRoleValue = string + +// List of available CIPipelineVariablesMinimumOverrideRoleValue values. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html +const ( + CIPipelineVariablesNoOneAllowedRole CIPipelineVariablesMinimumOverrideRoleValue = "no_one_allowed" + CiPipelineVariablesOwnerRole CIPipelineVariablesMinimumOverrideRoleValue = "owner" + CiPipelineVariablesMaintainerRole CIPipelineVariablesMinimumOverrideRoleValue = "maintainer" + CIPipelineVariablesDeveloperRole CIPipelineVariablesMinimumOverrideRoleValue = "developer" +) diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/gitlab.com/gitlab-org/api/client-go/users.go similarity index 85% rename from vendor/github.com/xanzy/go-gitlab/users.go rename to vendor/gitlab.com/gitlab-org/api/client-go/users.go index 5ad87ca0..9f65c48a 100644 --- a/vendor/github.com/xanzy/go-gitlab/users.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/users.go @@ -17,11 +17,15 @@ package gitlab import ( + "encoding/json" "errors" "fmt" + "io" "net" "net/http" "time" + + "github.com/hashicorp/go-retryablehttp" ) // List a couple of standard errors. @@ -52,11 +56,22 @@ type BasicUser struct { Username string `json:"username"` Name string `json:"name"` State string `json:"state"` + Locked bool `json:"locked"` CreatedAt *time.Time `json:"created_at"` AvatarURL string `json:"avatar_url"` WebURL string `json:"web_url"` } +// ServiceAccount represents a GitLab service account. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/user_service_accounts.html +type ServiceAccount struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` +} + // User represents a GitLab user. // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html @@ -84,6 +99,7 @@ type User struct { LastActivityOn *ISOTime `json:"last_activity_on"` ColorSchemeID int `json:"color_scheme_id"` IsAdmin bool `json:"is_admin"` + IsAuditor bool `json:"is_auditor"` AvatarURL string `json:"avatar_url"` CanCreateGroup bool `json:"can_create_group"` CanCreateProject bool `json:"can_create_project"` @@ -104,6 +120,7 @@ type User struct { CustomAttributes []*CustomAttribute `json:"custom_attributes"` NamespaceID int `json:"namespace_id"` Locked bool `json:"locked"` + CreatedBy *BasicUser `json:"created_by"` } // UserIdentity represents a user identity. @@ -112,6 +129,23 @@ type UserIdentity struct { ExternUID string `json:"extern_uid"` } +// UserAvatar represents a GitLab user avatar. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html +type UserAvatar struct { + Filename string + Image io.Reader +} + +// MarshalJSON implements the json.Marshaler interface. +func (a *UserAvatar) MarshalJSON() ([]byte, error) { + if a.Filename == "" && a.Image == nil { + return []byte(`""`), nil + } + type alias UserAvatar + return json.Marshal((*alias)(a)) +} + // ListUsersOptions represents the available ListUsers() options. // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-users @@ -188,37 +222,53 @@ func (s *UsersService) GetUser(user int, opt GetUsersOptions, options ...Request // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-creation type CreateUserOptions struct { - Email *string `url:"email,omitempty" json:"email,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - ResetPassword *bool `url:"reset_password,omitempty" json:"reset_password,omitempty"` - ForceRandomPassword *bool `url:"force_random_password,omitempty" json:"force_random_password,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Skype *string `url:"skype,omitempty" json:"skype,omitempty"` - Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` - Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` - WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` - Organization *string `url:"organization,omitempty" json:"organization,omitempty"` - JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` - ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` - ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` - Bio *string `url:"bio,omitempty" json:"bio,omitempty"` - Location *string `url:"location,omitempty" json:"location,omitempty"` - Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` - External *bool `url:"external,omitempty" json:"external,omitempty"` - PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` - Note *string `url:"note,omitempty" json:"note,omitempty"` - ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` + Avatar *UserAvatar `url:"-" json:"-"` + Bio *string `url:"bio,omitempty" json:"bio,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + External *bool `url:"external,omitempty" json:"external,omitempty"` + ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` + ForceRandomPassword *bool `url:"force_random_password,omitempty" json:"force_random_password,omitempty"` + JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` + Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` + Location *string `url:"location,omitempty" json:"location,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Note *string `url:"note,omitempty" json:"note,omitempty"` + Organization *string `url:"organization,omitempty" json:"organization,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` + ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` + ResetPassword *bool `url:"reset_password,omitempty" json:"reset_password,omitempty"` + SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` + Skype *string `url:"skype,omitempty" json:"skype,omitempty"` + ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` } // CreateUser creates a new user. Note only administrators can create new users. // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-creation func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "users", opt, options) + var err error + var req *retryablehttp.Request + + if opt.Avatar == nil { + req, err = s.client.NewRequest(http.MethodPost, "users", opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPost, + "users", + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } if err != nil { return nil, nil, err } @@ -236,30 +286,31 @@ func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...RequestOpti // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-modification type ModifyUserOptions struct { - Email *string `url:"email,omitempty" json:"email,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Skype *string `url:"skype,omitempty" json:"skype,omitempty"` - Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` - Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` - WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` - Organization *string `url:"organization,omitempty" json:"organization,omitempty"` - JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` - ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` - ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` - Bio *string `url:"bio,omitempty" json:"bio,omitempty"` - Location *string `url:"location,omitempty" json:"location,omitempty"` - Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - SkipReconfirmation *bool `url:"skip_reconfirmation,omitempty" json:"skip_reconfirmation,omitempty"` - External *bool `url:"external,omitempty" json:"external,omitempty"` - PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` - Note *string `url:"note,omitempty" json:"note,omitempty"` - ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` - PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` - CommitEmail *string `url:"commit_email,omitempty" json:"commit_email,omitempty"` + Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` + Avatar *UserAvatar `url:"-" json:"avatar,omitempty"` + Bio *string `url:"bio,omitempty" json:"bio,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + CommitEmail *string `url:"commit_email,omitempty" json:"commit_email,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + External *bool `url:"external,omitempty" json:"external,omitempty"` + ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` + JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` + Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` + Location *string `url:"location,omitempty" json:"location,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Note *string `url:"note,omitempty" json:"note,omitempty"` + Organization *string `url:"organization,omitempty" json:"organization,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` + ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` + PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` + SkipReconfirmation *bool `url:"skip_reconfirmation,omitempty" json:"skip_reconfirmation,omitempty"` + Skype *string `url:"skype,omitempty" json:"skype,omitempty"` + ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` } // ModifyUser modifies an existing user. Only administrators can change attributes @@ -267,9 +318,23 @@ type ModifyUserOptions struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-modification func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { + var err error + var req *retryablehttp.Request u := fmt.Sprintf("users/%d", user) - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { + req, err = s.client.NewRequest(http.MethodPut, u, opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPut, + u, + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } if err != nil { return nil, nil, err } @@ -431,23 +496,24 @@ func (s *UsersService) GetUserAssociationsCount(user int, options ...RequestOpti // SSHKey represents a SSH key. // -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys +// GitLab API docs: https://docs.gitlab.com/ee/api/user_keys.html#list-all-ssh-keys type SSHKey struct { ID int `json:"id"` Title string `json:"title"` Key string `json:"key"` CreatedAt *time.Time `json:"created_at"` ExpiresAt *time.Time `json:"expires_at"` + UsageType string `json:"usage_type"` } // ListSSHKeysOptions represents the available ListSSHKeys options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys +// GitLab API docs: https://docs.gitlab.com/ee/api/user_keys.html#list-all-ssh-keys type ListSSHKeysOptions ListOptions // ListSSHKeys gets a list of currently authenticated user's SSH keys. // -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys +// GitLab API docs: https://docs.gitlab.com/ee/api/user_keys.html#list-all-ssh-keys func (s *UsersService) ListSSHKeys(opt *ListSSHKeysOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) { req, err := s.client.NewRequest(http.MethodGet, "user/keys", opt, options) if err != nil { @@ -1490,11 +1556,68 @@ func (s *UsersService) CreateUserRunner(opts *CreateUserRunnerOptions, options . return r, resp, nil } -// CreateServiceAccountUser creates a new service account user. Note only administrators can create new service account users. +// CreateServiceAccountUserOptions represents the available CreateServiceAccountUser() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/user_service_accounts.html#create-a-service-account-user +type CreateServiceAccountUserOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` +} + +// CreateServiceAccountUser creates a new service account user. // -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#create-service-account-user -func (s *UsersService) CreateServiceAccountUser(options ...RequestOptionFunc) (*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "service_accounts", nil, options) +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-service-account-user +func (s *UsersService) CreateServiceAccountUser(opts *CreateServiceAccountUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "service_accounts", opts, options) + if err != nil { + return nil, nil, err + } + + usr := new(User) + resp, err := s.client.Do(req, usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} + +// ListServiceAccounts lists all service accounts. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-service-account-user +func (s *UsersService) ListServiceAccounts(opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*ServiceAccount, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "service_accounts", opt, options) + if err != nil { + return nil, nil, err + } + + var sas []*ServiceAccount + resp, err := s.client.Do(req, &sas) + if err != nil { + return nil, resp, err + } + + return sas, resp, nil +} + +// UploadAvatar uploads an avatar to the current user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#upload-a-current-user-avatar +func (s *UsersService) UploadAvatar(avatar io.Reader, filename string, options ...RequestOptionFunc) (*User, *Response, error) { + u := "user/avatar" + + req, err := s.client.UploadRequest( + http.MethodPut, + u, + avatar, + filename, + UploadAvatar, + nil, + options, + ) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/xanzy/go-gitlab/validate.go b/vendor/gitlab.com/gitlab-org/api/client-go/validate.go similarity index 84% rename from vendor/github.com/xanzy/go-gitlab/validate.go rename to vendor/gitlab.com/gitlab-org/api/client-go/validate.go index cb79ac83..f4aa11f4 100644 --- a/vendor/github.com/xanzy/go-gitlab/validate.go +++ b/vendor/gitlab.com/gitlab-org/api/client-go/validate.go @@ -44,10 +44,26 @@ type LintResult struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration type ProjectLintResult struct { - Valid bool `json:"valid"` - Errors []string `json:"errors"` - Warnings []string `json:"warnings"` - MergedYaml string `json:"merged_yaml"` + Valid bool `json:"valid"` + Errors []string `json:"errors"` + Warnings []string `json:"warnings"` + MergedYaml string `json:"merged_yaml"` + Includes []Include `json:"includes"` +} + +// Include contains the details about an include block in the .gitlab-ci.yml file. +// It is used in ProjectLintResult. +// +// Reference can be found at the lint API endpoint in the openapi yaml: +// https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/api/openapi/openapi_v2.yaml +type Include struct { + Type string `json:"type"` + Location string `json:"location"` + Blob string `json:"blob"` + Raw string `json:"raw"` + Extra map[string]interface{} `json:"extra"` + ContextProject string `json:"context_project"` + ContextSHA string `json:"context_sha"` } // LintOptions represents the available Lint() options. diff --git a/vendor/github.com/xanzy/go-gitlab/version.go b/vendor/gitlab.com/gitlab-org/api/client-go/version.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/version.go rename to vendor/gitlab.com/gitlab-org/api/client-go/version.go diff --git a/vendor/github.com/xanzy/go-gitlab/wikis.go b/vendor/gitlab.com/gitlab-org/api/client-go/wikis.go similarity index 100% rename from vendor/github.com/xanzy/go-gitlab/wikis.go rename to vendor/gitlab.com/gitlab-org/api/client-go/wikis.go diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 00000000..773c9b64 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 00000000..088d19a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 00000000..ad73d8cb --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 00000000..af6ef171 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 00000000..949e2165 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 00000000..e854d7e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 00000000..29e629d6 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 00000000..cecad8ba --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 00000000..b6f2e28d --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 00000000..a13a6b73 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 00000000..1217776e --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 00000000..69a348f0 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 00000000..0dd01b06 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 00000000..86babf1a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 00000000..6ebea12a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 00000000..cbcfabde --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 00000000..dbc477a5 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9..6bf3abc4 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb..e2cb3ea9 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664..ae8577ef 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f569..00000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f3..ce3f40b6 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,8 +9,11 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -19,10 +22,16 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck + - tenv + - testifylint - typecheck + - unconvert - unused + - unparam + - usestdlibvars issues: # Maximum issues count per one linter. @@ -54,16 +63,17 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + - perfsprint + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec - # Igonoring gosec G402: TLS MinVersion too low + # Ignoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -88,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -120,10 +137,10 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. @@ -147,6 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. @@ -294,3 +317,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 98f2d204..599d59cd 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,353 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + ## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 This release is the last to support [Go 1.20]. @@ -22,6 +369,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -138,7 +486,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -170,15 +518,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -814,7 +1162,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1682,7 +2030,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -1859,7 +2207,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2256,7 +2604,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) +- Update otel-collector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -2849,7 +3197,17 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 [1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 [1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 [1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 @@ -2928,6 +3286,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 @@ -2937,3 +3298,5 @@ It contains api and sdk for trace and meter. [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 31d336d9..945a07d2 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index c9f2bac5..22a2e9db 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way: [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is an example of a very well-documented package. +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + ## Style Guide One of the primary goals of this project is that it is actually used by @@ -560,12 +570,18 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. ### Internal packages @@ -613,31 +629,34 @@ should be canceled. ## Approvers and Maintainers -### Approvers +### Triagers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + +### Approvers ### Maintainers +- [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 6de95219..a7f6d8cc 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -1,16 +1,5 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 TOOLS_MOD_DIR := ./internal/tools @@ -25,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default -ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools $(TOOLS): @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ $(GO) build -o $@ $(PACKAGE) @@ -50,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -68,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -81,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -110,7 +93,7 @@ $(PYTOOLS): @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" # Install python packages into the virtual environment. -$(PYTOOLS)/%: | $(PYTOOLS) +$(PYTOOLS)/%: $(PYTOOLS) @$(DOCKERPY) $(PIP) install -r requirements.txt CODESPELL = $(PYTOOLS)/codespell @@ -124,18 +107,18 @@ generate: go-generate vanity-import-fix .PHONY: go-generate go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate/%: DIR=$* -go-generate/%: | $(STRINGER) $(GOTMPL) +go-generate/%: $(STRINGER) $(GOTMPL) @echo "$(GO) generate $(DIR)/..." \ && cd $(DIR) \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... .PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) +vanity-import-fix: $(PORTO) @$(PORTO) --include-internal -w . # Generate go.work file for local development. .PHONY: go-work -go-work: | $(CROSSLINK) +go-work: $(CROSSLINK) $(CROSSLINK) work --root=$(shell pwd) # Build @@ -159,12 +142,14 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -178,7 +163,7 @@ test/%: COVERAGE_MODE = atomic COVERAGE_PROFILE = coverage.out .PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) +test-coverage: $(GOCOVMERGE) @set -e; \ printf "" > coverage.txt; \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \ @@ -192,40 +177,37 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) +golangci-lint/%: $(GOLANGCI_LINT) @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ && cd $(DIR) \ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) .PHONY: crosslink -crosslink: | $(CROSSLINK) +crosslink: $(CROSSLINK) @echo "Updating intra-repository dependencies in all go modules" \ && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink +go-mod-tidy/%: crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy -compat=1.20 + && $(GO) mod tidy -compat=1.21 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -234,25 +216,35 @@ lint-modules: go-mod-tidy lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check -vanity-import-check: | $(PORTO) +vanity-import-check: $(PORTO) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) .PHONY: misspell -misspell: | $(MISSPELL) +misspell: $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) .PHONY: govulncheck govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck/%: DIR=$* -govulncheck/%: | $(GOVULNCHECK) +govulncheck/%: $(GOVULNCHECK) @echo "govulncheck ./... in $(DIR)" \ && cd $(DIR) \ && $(GOVULNCHECK) ./... .PHONY: codespell -codespell: | $(CODESPELL) +codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -263,15 +255,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ @@ -284,13 +267,11 @@ check-clean-work-tree: SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease @@ -302,17 +283,25 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: | $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: | $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown -lint-markdown: +lint-markdown: docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 7766259a..d9a19207 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,6 +1,6 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) @@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s |---------|--------------------| | Traces | Stable | | Metrics | Stable | -| Logs | In development[^1] | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -47,23 +47,22 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.20 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Ubuntu | 1.20 | 386 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| MacOS | 1.20 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.20 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | -| Windows | 1.20 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -90,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export @@ -100,12 +99,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0b..4ebef4f9 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions @@ -63,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. @@ -104,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository @@ -134,6 +130,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e36..b8cb605c 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 00000000..5b3da8f1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go index dafe7424..eef51ebc 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index fe2bc576..318e42fc 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 638c213d..be9cd922 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index 841b271f..f2ba89ce 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index 0656a04e..d9a22c65 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 1ddf3ce0..3028f9a4 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index fb6da514..6cbefcea 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -1,24 +1,14 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( + "cmp" "encoding/json" "reflect" + "slices" "sort" - "sync" ) type ( @@ -26,23 +16,33 @@ type ( // immutable set of attributes, with an internal cache for storing // attribute encodings. // - // This type supports the Equivalent method of comparison using values of - // type Distinct. + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. Distinct struct { iface interface{} } - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). Sortable []KeyValue ) @@ -56,12 +56,6 @@ var ( iface: [0]KeyValue{}, }, } - - // sortables is a pool of Sortables used to create Sets with a user does - // not provide one. - sortables = sync.Pool{ - New: func() interface{} { return new(Sortable) }, - } ) // EmptySet returns a reference to a Set with no elements. @@ -187,13 +181,7 @@ func empty() Set { // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - srt := sortables.Get().(*Sortable) - s, _ := NewSetWithSortableFiltered(kvs, srt, nil) - sortables.Put(srt) + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set { // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -220,48 +206,12 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - srt := sortables.Get().(*Sortable) - s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) - sortables.Put(srt) - return s, filtered -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs // Stable sort so the following de-duplication can implement // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) position := len(kvs) - 1 offset := position - 1 @@ -289,6 +239,35 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S return Set{equivalent: computeDistinct(kvs)}, nil } +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + // filteredToFront filters slice in-place using keep function. All KeyValues that need to // be removed are moved to the front. All KeyValues that need to be kept are // moved (in-order) to the back. The index for the first KeyValue to be kept is @@ -368,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index cb21dd5c..9ea0ecbb 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" @@ -242,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 00000000..7d798435 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 7d27cf77..0e1fe242 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" @@ -19,6 +8,7 @@ import ( "fmt" "net/url" "strings" + "unicode/utf8" "go.opentelemetry.io/otel/internal/baggage" ) @@ -54,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -67,11 +63,15 @@ func NewKeyProperty(key string) (Property, error) { // NewKeyValueProperty returns a new Property for key with value. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewKeyValuePropertyRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -84,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -125,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -148,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -213,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -232,14 +257,18 @@ type Member struct { hasData bool } -// NewMemberRaw returns a new Member from the passed arguments. +// NewMember returns a new Member from the passed arguments. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewMemberRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -252,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -304,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } - // Decode a precent-encoded value. - value, err := url.PathUnescape(val) + // Decode a percent-encoded value. + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(c) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -324,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -341,13 +405,18 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } @@ -458,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -479,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -538,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -616,11 +693,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { return } - // Decode a precent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + // Decode a percent-encoded value. + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -641,6 +720,113 @@ func skipSpace(s string, offset int) int { return i } +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -656,19 +842,10 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return (c >= 0x23 && c <= 0x27) || - (c >= 0x30 && c <= 0x39) || - (c >= 0x41 && c <= 0x5a) || - (c >= 0x5e && c <= 0x7a) || - c == 0x21 || - c == 0x2a || - c == 0x2b || - c == 0x2d || - c == 0x2e || - c == 0x7c || - c == 0x7e + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { @@ -679,12 +856,109 @@ func validateValue(s string) bool { return true } +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + func validateValueChar(c int32) bool { - return c == 0x21 || - (c >= 0x23 && c <= 0x2b) || - (c >= 0x2d && c <= 0x3a) || - (c >= 0x3c && c <= 0x5b) || - (c >= 0x5d && c <= 0x7e) + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go index 24b34b75..a572461a 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go index 4545100d..b51d87ca 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides functionality for storing and retrieving diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 00000000..24c52b38 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 587ebae4..49a35b12 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -1,21 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -74,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} @@ -94,7 +84,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go index 4e328fbb..ee8db448 100644 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package codes defines the canonical error codes used by OpenTelemetry. diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 36d7c24e..921f8596 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otel provides global access to the OpenTelemetry API. The subpackages of @@ -28,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go index 72fad854..67414c71 100644 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh index 9a58fb1d..93e80ea3 100644 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -1,18 +1,7 @@ #!/usr/bin/env bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 4115fe3b..07623b67 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" @@ -18,12 +7,8 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) // GetErrorHandler returns the global ErrorHandler instance. // @@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 622c3ee3..691d96c7 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package attribute provide several helper functions for some commonly used @@ -25,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. @@ -60,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -74,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -88,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -102,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go index b96e5408..b4f85f44 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides base types and functionality to store and retrieve diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 4469700d..3aea9c49 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/internal/baggage" diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go index f532f07e..4259f032 100644 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 5e9b8304..c657ff8e 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,38 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" - "os" "sync/atomic" ) -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry @@ -44,59 +19,18 @@ type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *d.delegate.Load() +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index ebb13c20..ae92a425 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -24,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -51,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -82,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -113,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -144,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -175,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -206,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } @@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index c6f305a2..adbca7d3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -23,17 +12,20 @@ import ( "github.com/go-logr/stdr" ) -// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger atomic.Pointer[logr.Logger] +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() -// SetLogger overrides the globalLogger with l. +// SetLogger sets the global Logger to l. // // To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true` @@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) { globalLogger.Store(&l) } -func getLogger() logr.Logger { +// GetLogger returns the global logger. +func GetLogger() logr.Logger { return *globalLogger.Load() } // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(4).Info(msg, keysAndValues...) + GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) + GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(8).Info(msg, keysAndValues...) + GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. func Warn(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) + GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 7ed61c0e..a6acd8dc 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -1,23 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" + "reflect" "sync" - "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -76,6 +66,8 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -86,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } - t := &meter{name: name, opts: opts} + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} p.meters[key] = t return t } @@ -102,17 +94,29 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex - instruments []delegatedInstrument + instruments map[instID]delegatedInstrument registry list.List - delegate atomic.Value // metric.Meter + delegate metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -120,12 +124,12 @@ type delegatedInstrument interface { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - m.mtx.Lock() defer m.mtx.Unlock() + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -143,147 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } + i := &siGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } + i := &sfGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - m.mtx.Lock() defer m.mtx.Unlock() + if m.delegate != nil { + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) + } + reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -295,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -323,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} + +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -334,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go index 06bac35c..38560ff9 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 386c8bfd..204ea142 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -25,6 +14,10 @@ import ( ) type ( + errorHandlerHolder struct { + eh ErrorHandler + } + tracerProviderHolder struct { tp trace.TracerProvider } @@ -39,15 +32,59 @@ type ( ) var ( + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp @@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 3f61ec12..8982aa0d 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -36,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -97,6 +87,8 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -115,6 +107,8 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T type il struct { name string version string + schema string + attrs attribute.Set } // tracer is a placeholder for a trace.Tracer. @@ -152,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s @@ -193,6 +211,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + // SetName does nothing. func (nonRecordingSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index e07e7940..b2fe3e41 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" @@ -31,11 +20,13 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -47,9 +38,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go index c4f8acd5..6de7f2e4 100644 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index f9551719..1e6473b3 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 00000000..0cf902e0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index 072baa8e..f8435d8f 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -50,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -165,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string @@ -224,7 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. +// observations for a Float64Observable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 9bd6ebf0..e079aaef 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -223,7 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the +// for an Int64Observable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 778ad2d7..d9e3b13e 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 54716e13..f153745b 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package metric provides the OpenTelemetry API used to measure metrics about @@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -164,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 00000000..1f6e0efa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index ae0bdbd2..1a9dc680 100644 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // metric API]. @@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index be89cd53..a535782e 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -27,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -34,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c @@ -340,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 2520bc74..14e08c24 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -58,17 +47,41 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -78,7 +91,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -88,7 +106,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -98,23 +121,51 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -124,7 +175,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -134,7 +190,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -144,6 +205,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a @@ -189,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 0a4825ae..8403a4ba 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -39,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -92,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -144,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 56667d32..783fdfba 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -144,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go index d29aaa32..2fd94973 100644 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 00000000..e2959ac7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 303cdf1c..552263ba 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go index c119eb28..33a3baf1 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package propagation contains OpenTelemetry context propagators. diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index c94438f7..8c8286aa 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 63e5d622..6870e316 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" @@ -46,7 +35,7 @@ var ( versionPart = fmt.Sprintf("%.2X", supportedVersion) ) -// Inject set tracecontext from the Context into the carrier. +// Inject injects the trace context from ctx into carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 00000000..4f80c898 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,26 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e13..ab09daf9 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go index 12d6b520..09e094de 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go index 4a711133..1a820bdb 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" @@ -72,7 +61,7 @@ func (c *NetConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -263,7 +252,7 @@ func (c *NetConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } @@ -320,5 +309,5 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bit size of 16 checked above. } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 00000000..87b842c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go index 71a1f774..e087c9c0 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go index 679c40c4..c7b804bb 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go index 9b8c559d..137acc67 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go index d5c4b5c1..d318221e 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md new file mode 100644 index 00000000..18ee2f3d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 HTTP conv + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go index fc43808f..76d1dc86 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go index 39a2eab3..7e365e82 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go index 42fc525d..634a1dce 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go index 8c4a7299..21497bb6 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 00000000..2de1fc3c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 00000000..d8dc822b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 00000000..d031bbea --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 00000000..bfaee0d5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 00000000..fcdb9f48 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 00000000..4c87c7ad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index caf7249d..6836c654 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 00000000..58ccaba6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 3aadc66c..9c0b720a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -224,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 76f9a083..8c45a710 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -33,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. @@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { - return noopSpan{} + return noopSpanInstance } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } - return noopSpan{} + return noopSpanInstance } // SpanContextFromContext returns the current Span's SpanContext. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 440f3d75..cdbf41d6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace provides an implementation of the tracing part of the @@ -107,7 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a trasitive +[go.opentelemetry.io/otel/trace], which may be done with a transitive dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 00000000..7754a239 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go index 898db5a7..3e359a00 100644 --- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // trace API]. diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 88fcb816..c00221e7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index c125491c..ca20e999 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure - span = noopSpan{} + span = noopSpanInstance } return ContextWithSpan(ctx, span), span } @@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption // noopSpan is an implementation of Span that performs no operations. type noopSpan struct{ embedded.Span } -var _ Span = noopSpan{} +var noopSpanInstance Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -86,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + // SetName does nothing. func (noopSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md new file mode 100644 index 00000000..cd382c82 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md @@ -0,0 +1,3 @@ +# Trace Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go new file mode 100644 index 00000000..64a4f1b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry trace API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry trace API will effectively +// disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry trace API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/trace/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ trace.TracerProvider = TracerProvider{} + _ trace.Tracer = Tracer{} + _ trace.Span = Span{} +) + +// TracerProvider is an OpenTelemetry No-Op TracerProvider. +type TracerProvider struct{ embedded.TracerProvider } + +// NewTracerProvider returns a TracerProvider that does not record any telemetry. +func NewTracerProvider() TracerProvider { + return TracerProvider{} +} + +// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. +func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { + return Tracer{} +} + +// Tracer is an OpenTelemetry No-Op Tracer. +type Tracer struct{ embedded.Tracer } + +// Start creates a span. The created span will be set in a child context of ctx +// and returned with the span. +// +// If ctx contains a span context, the returned span will also contain that +// span context. If the span context in ctx is for a non-recording span, that +// span instance will be returned directly. +func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + span := trace.SpanFromContext(ctx) + + // If the parent context contains a non-zero span context, that span + // context needs to be returned as a non-recording span + // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). + var zeroSC trace.SpanContext + if sc := span.SpanContext(); !sc.Equal(zeroSC) { + if !span.IsRecording() { + // If the span is not recording return it directly. + return ctx, span + } + // Otherwise, return the span context needs in a non-recording span. + span = Span{sc: sc} + } else { + // No parent, return a No-Op span with an empty span context. + span = noopSpanInstance + } + return trace.ContextWithSpan(ctx, span), span +} + +var noopSpanInstance trace.Span = Span{} + +// Span is an OpenTelemetry No-Op Span. +type Span struct { + embedded.Span + + sc trace.SpanContext +} + +// SpanContext returns an empty span context. +func (s Span) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (Span) IsRecording() bool { return false } + +// SetStatus does nothing. +func (Span) SetStatus(codes.Code, string) {} + +// SetAttributes does nothing. +func (Span) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (Span) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (Span) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (Span) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (Span) AddLink(trace.Link) {} + +// SetName does nothing. +func (Span) SetName(string) {} + +// TracerProvider returns a No-Op TracerProvider. +func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 00000000..ef85cb70 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 00000000..d3aa476e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 26a4b226..d49adf67 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -1,28 +1,12 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -337,241 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 00000000..77952d2a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index db936ba5..dc5e34ca 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -271,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index dbb61a42..00000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 00000000..1e87855e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 00000000..c9b7cdbb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7b2993a1..eb22002d 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.24.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 1b556e67..ce4fe59b 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -1,32 +1,15 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 module-sets: stable-v1: - version: v1.24.0 + version: v1.34.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -40,16 +23,19 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.46.0 + version: v0.56.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.0.1-alpha + version: v0.10.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.7 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 56cdc7c2..fef687db 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -80,6 +80,7 @@ type handshakeTransport struct { pendingPackets [][]byte // Used when a key exchange is in progress. writePacketsLeft uint32 writeBytesLeft int64 + userAuthComplete bool // whether the user authentication phase is complete // If the read loop wants to schedule a kex, it pings this // channel, and the write loop will send out a kex @@ -552,16 +553,25 @@ func (t *handshakeTransport) sendKexInit() error { return nil } +var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase") + func (t *handshakeTransport) writePacket(p []byte) error { + t.mu.Lock() + defer t.mu.Unlock() + switch p[0] { case msgKexInit: return errors.New("ssh: only handshakeTransport can send kexInit") case msgNewKeys: return errors.New("ssh: only handshakeTransport can send newKeys") + case msgUserAuthBanner: + if t.userAuthComplete { + return errSendBannerPhase + } + case msgUserAuthSuccess: + t.userAuthComplete = true } - t.mu.Lock() - defer t.mu.Unlock() if t.writeError != nil { return t.writeError } diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 5b5ccd96..1839ddc6 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -59,6 +59,27 @@ type GSSAPIWithMICConfig struct { Server GSSAPIServer } +// SendAuthBanner implements [ServerPreAuthConn]. +func (s *connection) SendAuthBanner(msg string) error { + return s.transport.writePacket(Marshal(&userAuthBannerMsg{ + Message: msg, + })) +} + +func (*connection) unexportedMethodForFutureProofing() {} + +// ServerPreAuthConn is the interface available on an incoming server +// connection before authentication has completed. +type ServerPreAuthConn interface { + unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB + + ConnMetadata + + // SendAuthBanner sends a banner message to the client. + // It returns an error once the authentication phase has ended. + SendAuthBanner(string) error +} + // ServerConfig holds server specific configuration data. type ServerConfig struct { // Config contains configuration shared between client and server. @@ -118,6 +139,12 @@ type ServerConfig struct { // attempts. AuthLogCallback func(conn ConnMetadata, method string, err error) + // PreAuthConnCallback, if non-nil, is called upon receiving a new connection + // before any authentication has started. The provided ServerPreAuthConn + // can be used at any time before authentication is complete, including + // after this callback has returned. + PreAuthConnCallback func(ServerPreAuthConn) + // ServerVersion is the version identification string to announce in // the public handshake. // If empty, a reasonable default is used. @@ -488,6 +515,10 @@ func (b *BannerError) Error() string { } func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + if config.PreAuthConnCallback != nil { + config.PreAuthConnCallback(s) + } + sessionID := s.transport.getSessionID() var cache pubKeyCache var perms *Permissions @@ -495,7 +526,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err authFailures := 0 noneAuthCount := 0 var authErrs []error - var displayedBanner bool + var calledBannerCallback bool partialSuccessReturned := false // Set the initial authentication callbacks from the config. They can be // changed if a PartialSuccessError is returned. @@ -542,14 +573,10 @@ userAuthLoop: s.user = userAuthReq.User - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + if !calledBannerCallback && config.BannerCallback != nil { + calledBannerCallback = true + if msg := config.BannerCallback(s); msg != "" { + if err := s.SendAuthBanner(msg); err != nil { return nil, err } } @@ -762,10 +789,7 @@ userAuthLoop: var bannerErr *BannerError if errors.As(authErr, &bannerErr) { if bannerErr.Message != "" { - bannerMsg := &userAuthBannerMsg{ - Message: bannerErr.Message, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + if err := s.SendAuthBanner(bannerErr.Message); err != nil { return nil, err } } diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 5e8158bb..46ceac34 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { return s } +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check - return append(s[:i], s[j:]...) + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. -// When DeleteFunc removes m elements, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage -// collected. +// DeleteFunc zeroes the elements between the new length and the original length. func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i := IndexFunc(s, del) if i == -1 { @@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice @@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { if i+len(v) != j { copy(r[i+len(v):], s[j:]) } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC return r } @@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S { // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { if len(s) < 2 { return s @@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s @@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index b67897f7..f58bbc7b 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -22,10 +22,12 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc sorts the slice x in ascending order as determined by the cmp // function. This sort is not guaranteed to be stable. // cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b. +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go index 74f88738..bd635cb8 100644 --- a/vendor/golang.org/x/exp/slog/handler.go +++ b/vendor/golang.org/x/exp/slog/handler.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "reflect" "strconv" "sync" "time" @@ -504,6 +505,23 @@ func (s *handleState) appendString(str string) { } func (s *handleState) appendValue(v Value) { + defer func() { + if r := recover(); r != nil { + // If it panics with a nil pointer, the most likely cases are + // an encoding.TextMarshaler or error fails to guard against nil, + // in which case "" seems to be the feasible choice. + // + // Adapted from the code in fmt/print.go. + if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() { + s.appendString("") + return + } + + // Otherwise just print the original panic message. + s.appendString(fmt.Sprintf("!PANIC: %v", r)) + } + }() + var err error if s.h.json { err = appendJSONValue(s, v) diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 00000000..37dc0cfd --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index de58dfb8..ca645d9a 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go index e3784123..5b516c55 100644 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index c7601c90..6c18ea23 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,11 +34,19 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool - disableExtendedConnectProtocol bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -51,8 +59,8 @@ func init() { logFrameWrites = true logFrameReads = true } - if strings.Contains(e, "http2xconnect=0") { - disableExtendedConnectProtocol = true + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false } } @@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index b55547ae..7434b878 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -50,6 +50,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 090d0e1b..f2c166b6 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "sort" "strconv" "strings" "sync" @@ -35,6 +34,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -375,6 +375,7 @@ type ClientConn struct { doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool + closedOnIdle bool // true if conn was closed for idleness seenSettings bool // true if we've seen a settings frame, false otherwise seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back @@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. - if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { st.canTakeNewRequest = true } @@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1271,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1299,35 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - -// actualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func actualContentLength(req *http.Request) int64 { - if req.Body == nil || req.Body == http.NoBody { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - func (cc *ClientConn) decrStreamReservations() { cc.mu.Lock() defer cc.mu.Unlock() @@ -1352,7 +1310,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) reqCancel: req.Cancel, isHead: req.Method == "HEAD", reqBody: req.Body, - reqBodyContentLength: actualContentLength(req), + reqBodyContentLength: httpcommon.ActualContentLength(req), trace: httptrace.ContextClientTrace(ctx), peerClosed: make(chan struct{}), abort: make(chan struct{}), @@ -1360,25 +1318,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } + cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1409,7 +1349,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) } res.Request = req res.TLS = cc.tlsState - if res.Body == noBody && actualContentLength(req) == 0 { + if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 { // If there isn't a request or response body still being // written, then wait for the stream to be closed before // RoundTrip returns. @@ -1492,10 +1432,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err - } - // wait for setting frames to be received, a server can change this value later, // but we just wait for the first settings frame var isExtendedConnect bool @@ -1659,20 +1595,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.hbuf.Reset() + res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{ + Request: req, + AddGzipHeader: cs.requestedGzip, + PeerMaxHeaderListSize: cc.peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) @@ -2066,218 +2004,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -func isNormalConnect(req *http.Request) bool { - return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" -} - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if !isNormalConnect(req) { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if !isNormalConnect(req) { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - // requires cc.wmu be held. func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() @@ -2294,7 +2020,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2434,9 +2160,12 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. - const unusedWaitTime = 5 * time.Second + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } idleTime := cc.t.now().Sub(cc.lastActive) - if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) @@ -2646,7 +2375,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2654,7 +2383,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2778,7 +2507,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -3324,7 +3053,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3508,16 +3237,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 6ff6bee7..fdb35b94 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 00000000..ed14da5a --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 77% rename from vendor/golang.org/x/net/http2/headermap.go rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd2..ad3fbacd 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,8 +1,8 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( "net/http" @@ -88,7 +88,9 @@ func buildCommonHeaderMaps() { } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } return http.CanonicalHeaderKey(v) } + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok +} diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 00000000..34391477 --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,379 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptrace" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request *http.Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersParam is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + protocol := req.Header.Get(":protocol") + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + contentLength := ActualContentLength(req) + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = http.MethodGet + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(req.Context()) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = contentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(req *http.Request, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// ActualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func ActualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c2..48dbb9d8 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 90a2c3d6..74f052aa 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is @@ -393,7 +393,7 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the // TokenSource returned by ReuseTokenSource, except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321..109997d7 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee6..b8322598 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 02609d5b..9c105f23 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,6 +72,9 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 600a6807..1e642f33 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,6 +53,9 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -106,7 +109,7 @@ func archInit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -134,14 +137,24 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdf..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 5627d70e..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index d60ab1b4..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 00000000..37a82528 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 00000000..1200487f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f..be8c0020 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af0..abc39554 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6ebc48b3..4f432bfe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1330,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1551,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1623,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1867,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1967,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2083,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2163,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2491,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2499,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2525,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2592,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2602,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2911,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2920,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c0d45e32..75207613 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -304,6 +306,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c731d24f..c68acda5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,6 +307,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 680018a4..a8c607ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -310,6 +312,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a63909f3..18563dd8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,6 +109,7 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -119,6 +120,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -302,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9b0a2573..22912cda 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -297,6 +299,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 958e6e06..29344eb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 50c7f25b..20d51fb9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ced21d66..321b6090 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 226c0441..9bacdf1e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 3122737c..c2242726 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -358,6 +360,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb5d3467..6270c8ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e921ebc6..9966c194 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 38ba81c5..848e5fcc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -294,6 +296,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 71f04009..669b2adb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -366,6 +368,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c44a3133..4834e575 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -357,6 +359,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87fe..c6545413 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820..c79aaff3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf4..5eb45069 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3..05e50297 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe..38c53ec5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017d..31d2e71a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1e..f4184a33 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5..05b99622 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60..43a256e9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a..eea5ddfc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7..0d777bfb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d..b4463650 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416d..0c7d21c1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e76..84053916 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825..fcf1b790 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77..52d15b5f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 5537148d..a46abe64 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf6..3ca814f5 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f49..ec5f0cdd 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) @@ -416,8 +405,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 00000000..8b462f3d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,119 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/annotations.proto + +package annotations + +import ( + reflect "reflect" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See `HttpRule`. + // + // optional google.api.HttpRule http = 72295728; + E_Http = &file_google_api_annotations_proto_extTypes[0] +) + +var File_google_api_annotations_proto protoreflect.FileDescriptor + +var file_google_api_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, + 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_google_api_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions + (*HttpRule)(nil), // 1: google.api.HttpRule +} +var file_google_api_annotations_proto_depIdxs = []int32{ + 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions + 1, // 1: google.api.http:type_name -> google.api.HttpRule + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_annotations_proto_init() } +func file_google_api_annotations_proto_init() { + if File_google_api_annotations_proto != nil { + return + } + file_google_api_http_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_annotations_proto_goTypes, + DependencyIndexes: file_google_api_annotations_proto_depIdxs, + ExtensionInfos: file_google_api_annotations_proto_extTypes, + }.Build() + File_google_api_annotations_proto = out.File + file_google_api_annotations_proto_rawDesc = nil + file_google_api_annotations_proto_goTypes = nil + file_google_api_annotations_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go new file mode 100644 index 00000000..4a9fce53 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -0,0 +1,2069 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/client.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 + // Shopping Org. + ClientLibraryOrganization_SHOPPING ClientLibraryOrganization = 5 + // Geo Org. + ClientLibraryOrganization_GEO ClientLibraryOrganization = 6 + // Generative AI - https://developers.generativeai.google + ClientLibraryOrganization_GENERATIVE_AI ClientLibraryOrganization = 7 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + 5: "SHOPPING", + 6: "GEO", + 7: "GENERATIVE_AI", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + "SHOPPING": 5, + "GEO": 6, + "GENERATIVE_AI": 7, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + // + // Deprecated: Do not use. + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Do not use. +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration { + if x != nil { + return x.SelectiveGapicGeneration + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. This is the full protobuf + // package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a *public* URI where users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` + // Optional link to proto reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rpc + ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"` + // Optional link to REST reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rest + RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +func (x *Publishing) GetProtoReferenceDocumentationUri() string { + if x != nil { + return x.ProtoReferenceDocumentationUri + } + return "" +} + +func (x *Publishing) GetRestReferenceDocumentationUri() string { + if x != nil { + return x.RestReferenceDocumentationUri + } + return "" +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map from original service names to renamed versions. + // This is used when the default generated types + // would cause a naming conflict. (Neither name is + // fully-qualified.) + // Example: Subscriber to SubscriberServiceApi. + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Map from full resource types to the effective short name + // for the resource. This is used when otherwise resource + // named from different services would cause naming collisions. + // Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + RenamedResources map[string]string `protobuf:"bytes,3,rep,name=renamed_resources,json=renamedResources,proto3" json:"renamed_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // List of full resource types to ignore during generation. + // This is typically used for API-specific Location resources, + // which should be handled by the generator as if they were actually + // the common Location resources. + // Example entry: "documentai.googleapis.com/Location" + IgnoredResources []string `protobuf:"bytes,4,rep,name=ignored_resources,json=ignoredResources,proto3" json:"ignored_resources,omitempty"` + // Namespaces which must be aliased in snippets due to + // a known (but non-generator-predictable) naming collision + ForcedNamespaceAliases []string `protobuf:"bytes,5,rep,name=forced_namespace_aliases,json=forcedNamespaceAliases,proto3" json:"forced_namespace_aliases,omitempty"` + // Method signatures (in the form "service.method(signature)") + // which are provided separately, so shouldn't be generated. + // Snippets *calling* these methods are still generated, however. + HandwrittenSignatures []string `protobuf:"bytes,6,rep,name=handwritten_signatures,json=handwrittenSignatures,proto3" json:"handwritten_signatures,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +func (x *DotnetSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + +func (x *DotnetSettings) GetRenamedResources() map[string]string { + if x != nil { + return x.RenamedResources + } + return nil +} + +func (x *DotnetSettings) GetIgnoredResources() []string { + if x != nil { + return x.IgnoredResources + } + return nil +} + +func (x *DotnetSettings) GetForcedNamespaceAliases() []string { + if x != nil { + return x.ForcedNamespaceAliases + } + return nil +} + +func (x *DotnetSettings) GetHandwrittenSignatures() []string { + if x != nil { + return x.HandwrittenSignatures + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map of service names to renamed services. Keys are the package relative + // service names and values are the name to be used for the service client + // and call options. + // + // publishing: + // + // go_settings: + // renamed_services: + // Publisher: TopicAdmin + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +func (x *GoSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_settings: + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +func (x *MethodSettings) GetAutoPopulatedFields() []string { + if x != nil { + return x.AutoPopulatedFields + } + return nil +} + +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +type SelectiveGapicGeneration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"` +} + +func (x *SelectiveGapicGeneration) Reset() { + *x = SelectiveGapicGeneration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelectiveGapicGeneration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectiveGapicGeneration) ProtoMessage() {} + +func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead. +func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{12} +} + +func (x *SelectiveGapicGeneration) GetMethods() []string { + if x != nil { + return x.Methods + } + return nil +} + +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` + // Enables generation of protobuf code using new types that are more + // Pythonic which are included in `protobuf>=5.29.x`. This feature will be + // enabled by default 1 month after launching the feature in preview + // packages. + ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + +func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool { + if x != nil { + return x.ProtobufPythonicTypesEnabled + } + return false +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + +var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: ([]string)(nil), + Field: 1051, + Name: "google.api.method_signature", + Tag: "bytes,1051,rep,name=method_signature", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1049, + Name: "google.api.default_host", + Tag: "bytes,1049,opt,name=default_host", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1050, + Name: "google.api.oauth_scopes", + Tag: "bytes,1050,opt,name=oauth_scopes", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 525000001, + Name: "google.api.api_version", + Tag: "bytes,525000001,opt,name=api_version", + Filename: "google/api/client.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // - Adding this annotation to an unannotated method is backwards + // compatible. + // - Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // - Modifying or removing an existing method signature annotation is + // a breaking change. + // - Re-ordering existing method signature annotations is a breaking + // change. + // + // repeated string method_signature = 1051; + E_MethodSignature = &file_google_api_client_proto_extTypes[0] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + // + // optional string default_host = 1049; + E_DefaultHost = &file_google_api_client_proto_extTypes[1] + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + // + // optional string oauth_scopes = 1050; + E_OauthScopes = &file_google_api_client_proto_extTypes[2] + // The API version of this service, which should be sent by version-aware + // clients to the service. This allows services to abide by the schema and + // behavior of the service at the time this API version was deployed. + // The format of the API version must be treated as opaque by clients. + // Services may use a format with an apparent structure, but clients must + // not rely on this to determine components within an API version, or attempt + // to construct other valid API versions. Note that this is for upcoming + // functionality and may not be implemented for all services. + // + // Example: + // + // service Foo { + // option (google.api.api_version) = "v1_20230821_preview"; + // } + // + // optional string api_version = 525000001; + E_ApiVersion = &file_google_api_client_proto_extTypes[3] +) + +var File_google_api_client_proto protoreflect.FileDescriptor + +var file_google_api_client_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, + 0x55, 0x72, 0x69, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, + 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, + 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, + 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, + 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, + 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, + 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, + 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, + 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, + 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, + 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x90, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, + 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, + 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, + 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, + 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a, + 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, + 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, + 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, + 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, + 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x34, + 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x73, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, + 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, + 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, + 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, + 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, + 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, + 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, + 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData +} + +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_google_api_client_proto_goTypes = []interface{}{ + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration + nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures + nil, // 17: google.api.DotnetSettings.RenamedServicesEntry + nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry + nil, // 19: google.api.GoSettings.RenamedServicesEntry + (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 21: google.api.LaunchStage + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions +} +var file_google_api_client_proto_depIdxs = []int32{ + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration + 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry + 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 31, // [31:35] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_google_api_client_proto_init() } +func file_google_api_client_proto_init() { + if File_google_api_client_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelectiveGapicGeneration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_client_proto_rawDesc, + NumEnums: 2, + NumMessages: 19, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_google_api_client_proto_goTypes, + DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, + ExtensionInfos: file_google_api_client_proto_extTypes, + }.Build() + File_google_api_client_proto = out.File + file_google_api_client_proto_rawDesc = nil + file_google_api_client_proto_goTypes = nil + file_google_api_client_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go new file mode 100644 index 00000000..08505ba3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -0,0 +1,266 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/field_behavior.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +type FieldBehavior int32 + +const ( + // Conventional default for enums. Do not use this. + FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + FieldBehavior_OPTIONAL FieldBehavior = 1 + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + FieldBehavior_REQUIRED FieldBehavior = 2 + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + FieldBehavior_INPUT_ONLY FieldBehavior = 4 + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + FieldBehavior_IMMUTABLE FieldBehavior = 5 + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + FieldBehavior_UNORDERED_LIST FieldBehavior = 6 + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + FieldBehavior_IDENTIFIER FieldBehavior = 8 +) + +// Enum value maps for FieldBehavior. +var ( + FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", + 6: "UNORDERED_LIST", + 7: "NON_EMPTY_DEFAULT", + 8: "IDENTIFIER", + } + FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, + "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7, + "IDENTIFIER": 8, + } +) + +func (x FieldBehavior) Enum() *FieldBehavior { + p := new(FieldBehavior) + *p = x + return p +} + +func (x FieldBehavior) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_behavior_proto_enumTypes[0].Descriptor() +} + +func (FieldBehavior) Type() protoreflect.EnumType { + return &file_google_api_field_behavior_proto_enumTypes[0] +} + +func (x FieldBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldBehavior.Descriptor instead. +func (FieldBehavior) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0} +} + +var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + // + // repeated google.api.FieldBehavior field_behavior = 1052; + E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] +) + +var File_google_api_field_behavior_proto protoreflect.FileDescriptor + +var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, + 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, + 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a, + 0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e, + 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, + 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, + 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, + 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52, + 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_field_behavior_proto_rawDescOnce sync.Once + file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc +) + +func file_google_api_field_behavior_proto_rawDescGZIP() []byte { + file_google_api_field_behavior_proto_rawDescOnce.Do(func() { + file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData) + }) + return file_google_api_field_behavior_proto_rawDescData +} + +var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_behavior_proto_goTypes = []interface{}{ + (FieldBehavior)(0), // 0: google.api.FieldBehavior + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_google_api_field_behavior_proto_depIdxs = []int32{ + 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions + 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_field_behavior_proto_init() } +func file_google_api_field_behavior_proto_init() { + if File_google_api_field_behavior_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_behavior_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_behavior_proto_goTypes, + DependencyIndexes: file_google_api_field_behavior_proto_depIdxs, + EnumInfos: file_google_api_field_behavior_proto_enumTypes, + ExtensionInfos: file_google_api_field_behavior_proto_extTypes, + }.Build() + File_google_api_field_behavior_proto = out.File + file_google_api_field_behavior_proto_rawDesc = nil + file_google_api_field_behavior_proto_goTypes = nil + file_google_api_field_behavior_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go new file mode 100644 index 00000000..a462e7d0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -0,0 +1,392 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters with zeros compressed, following + // [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example, + // the value `2001:0DB8:0::0` would be normalized to `2001:db8::`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 00000000..ffb5838c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,774 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/http.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` +} + +func (x *Http) Reset() { + *x = Http{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http) ProtoMessage() {} + +func (x *Http) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http.ProtoReflect.Descriptor instead. +func (*Http) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{0} +} + +func (x *Http) GetRules() []*HttpRule { + if x != nil { + return x.Rules + } + return nil +} + +func (x *Http) GetFullyDecodeReservedExpansion() bool { + if x != nil { + return x.FullyDecodeReservedExpansion + } + return false +} + +// gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// The following HTTP JSON to RPC mapping is enabled: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` +// +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// # Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// The following example selects a gRPC method and applies an `HttpRule` to it: +// +// http: +// rules: +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// # Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +type HttpRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are assignable to Pattern: + // + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` +} + +func (x *HttpRule) Reset() { + *x = HttpRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRule) ProtoMessage() {} + +func (x *HttpRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. +func (*HttpRule) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{1} +} + +func (x *HttpRule) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (x *HttpRule) GetGet() string { + if x, ok := x.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (x *HttpRule) GetPut() string { + if x, ok := x.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (x *HttpRule) GetPost() string { + if x, ok := x.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (x *HttpRule) GetDelete() string { + if x, ok := x.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (x *HttpRule) GetPatch() string { + if x, ok := x.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (x *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := x.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (x *HttpRule) GetBody() string { + if x != nil { + return x.Body + } + return "" +} + +func (x *HttpRule) GetResponseBody() string { + if x != nil { + return x.ResponseBody + } + return "" +} + +func (x *HttpRule) GetAdditionalBindings() []*HttpRule { + if x != nil { + return x.AdditionalBindings + } + return nil +} + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + // Maps to HTTP PUT. Used for replacing a resource. + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + // Maps to HTTP POST. Used for creating a resource or performing an action. + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + // Maps to HTTP DELETE. Used for deleting a resource. + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + // Maps to HTTP PATCH. Used for updating a resource. + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *CustomHttpPattern) Reset() { + *x = CustomHttpPattern{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomHttpPattern) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomHttpPattern) ProtoMessage() {} + +func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{2} +} + +func (x *CustomHttpPattern) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *CustomHttpPattern) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_google_api_http_proto protoreflect.FileDescriptor + +var file_google_api_http_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, + 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, + 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, + 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_http_proto_rawDescOnce sync.Once + file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc +) + +func file_google_api_http_proto_rawDescGZIP() []byte { + file_google_api_http_proto_rawDescOnce.Do(func() { + file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) + }) + return file_google_api_http_proto_rawDescData +} + +var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_api_http_proto_goTypes = []interface{}{ + (*Http)(nil), // 0: google.api.Http + (*HttpRule)(nil), // 1: google.api.HttpRule + (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern +} +var file_google_api_http_proto_depIdxs = []int32{ + 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule + 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern + 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_api_http_proto_init() } +func file_google_api_http_proto_init() { + if File_google_api_http_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomHttpPattern); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_http_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_http_proto_goTypes, + DependencyIndexes: file_google_api_http_proto_depIdxs, + MessageInfos: file_google_api_http_proto_msgTypes, + }.Build() + File_google_api_http_proto = out.File + file_google_api_http_proto_rawDesc = nil + file_google_api_http_proto_goTypes = nil + file_google_api_http_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go new file mode 100644 index 00000000..b5db279a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -0,0 +1,660 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/resource.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A description of the historical or future-looking state of the +// resource pattern. +type ResourceDescriptor_History int32 + +const ( + // The "unset" value. + ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0 + // The resource originally had one pattern and launched as such, and + // additional patterns were added later. + ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1 + // The resource has one pattern, but the API owner expects to add more + // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + // that from being necessary once there are multiple patterns.) + ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 +) + +// Enum value maps for ResourceDescriptor_History. +var ( + ResourceDescriptor_History_name = map[int32]string{ + 0: "HISTORY_UNSPECIFIED", + 1: "ORIGINALLY_SINGLE_PATTERN", + 2: "FUTURE_MULTI_PATTERN", + } + ResourceDescriptor_History_value = map[string]int32{ + "HISTORY_UNSPECIFIED": 0, + "ORIGINALLY_SINGLE_PATTERN": 1, + "FUTURE_MULTI_PATTERN": 2, + } +) + +func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History { + p := new(ResourceDescriptor_History) + *p = x + return p +} + +func (x ResourceDescriptor_History) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[0].Descriptor() +} + +func (ResourceDescriptor_History) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[0] +} + +func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_History.Descriptor instead. +func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0} +} + +// A flag representing a specific style that a resource claims to conform to. +type ResourceDescriptor_Style int32 + +const ( + // The unspecified value. Do not use. + ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0 + // This resource is intended to be "declarative-friendly". + // + // Declarative-friendly resources must be more strictly consistent, and + // setting this to true communicates to tools that this resource should + // adhere to declarative-friendly expectations. + // + // Note: This is used by the API linter (linter.aip.dev) to enable + // additional checks. + ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1 +) + +// Enum value maps for ResourceDescriptor_Style. +var ( + ResourceDescriptor_Style_name = map[int32]string{ + 0: "STYLE_UNSPECIFIED", + 1: "DECLARATIVE_FRIENDLY", + } + ResourceDescriptor_Style_value = map[string]int32{ + "STYLE_UNSPECIFIED": 0, + "DECLARATIVE_FRIENDLY": 1, + } +) + +func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style { + p := new(ResourceDescriptor_Style) + *p = x + return p +} + +func (x ResourceDescriptor_Style) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[1].Descriptor() +} + +func (ResourceDescriptor_Style) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[1] +} + +func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_Style.Descriptor instead. +func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1} +} + +// A simple descriptor of a resource type. +// +// ResourceDescriptor annotates a resource message (either by means of a +// protobuf annotation or use in the service config), and associates the +// resource's schema, the resource type, and the pattern of the resource name. +// +// Example: +// +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// +// Sometimes, resources have multiple patterns, typically because they can +// live under multiple parents. +// +// Example: +// +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource type. It must be in the format of + // {service_name}/{resource_type_kind}. The `resource_type_kind` must be + // singular and must not include version numbers. + // + // Example: `storage.googleapis.com/Bucket` + // + // The value of the resource_type_kind must follow the regular expression + // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and + // should use PascalCase (UpperCamelCase). The maximum number of + // characters allowed for the `resource_type_kind` is 100. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; + // + // Examples: + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // The components in braces correspond to the IDs for each resource in the + // hierarchy. It is expected that, if multiple patterns are provided, + // the same component name (e.g. "project") refers to IDs of the same + // type of resource. + Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"` + // Optional. The field on the resource that designates the resource name + // field. If omitted, this is assumed to be "name". + NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"` + // Optional. The historical or future-looking state of the resource pattern. + // + // Example: + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 + Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` + // Style flag(s) for this resource. + // These indicate that a resource is expected to conform to a given + // style. See the specific style flags for additional information. + Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"` +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ResourceDescriptor) GetPattern() []string { + if x != nil { + return x.Pattern + } + return nil +} + +func (x *ResourceDescriptor) GetNameField() string { + if x != nil { + return x.NameField + } + return "" +} + +func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History { + if x != nil { + return x.History + } + return ResourceDescriptor_HISTORY_UNSPECIFIED +} + +func (x *ResourceDescriptor) GetPlural() string { + if x != nil { + return x.Plural + } + return "" +} + +func (x *ResourceDescriptor) GetSingular() string { + if x != nil { + return x.Singular + } + return "" +} + +func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style { + if x != nil { + return x.Style + } + return nil +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. +type ResourceReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource type that the annotated field references. + // + // Example: + // + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The resource type of a child collection that the annotated field + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. + // + // Example: + // + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` +} + +func (x *ResourceReference) Reset() { + *x = ResourceReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceReference) ProtoMessage() {} + +func (x *ResourceReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead. +func (*ResourceReference) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceReference) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ResourceReference) GetChildType() string { + if x != nil { + return x.ChildType + } + return "" +} + +var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*ResourceReference)(nil), + Field: 1055, + Name: "google.api.resource_reference", + Tag: "bytes,1055,opt,name=resource_reference", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: ([]*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource_definition", + Tag: "bytes,1053,rep,name=resource_definition", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource", + Tag: "bytes,1053,opt,name=resource", + Filename: "google/api/resource.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // An annotation that describes a resource reference, see + // [ResourceReference][]. + // + // optional google.api.ResourceReference resource_reference = 1055; + E_ResourceReference = &file_google_api_resource_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FileOptions. +var ( + // An annotation that describes a resource definition without a corresponding + // message; see [ResourceDescriptor][]. + // + // repeated google.api.ResourceDescriptor resource_definition = 1053; + E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // An annotation that describes a resource definition, see + // [ResourceDescriptor][]. + // + // optional google.api.ResourceDescriptor resource = 1053; + E_Resource = &file_google_api_resource_proto_extTypes[2] +) + +var File_google_api_resource_proto protoreflect.FileDescriptor + +var file_google_api_resource_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, + 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22, + 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49, + 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c, + 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, + 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c, + 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05, + 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45, + 0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, + 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_resource_proto_rawDescOnce sync.Once + file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc +) + +func file_google_api_resource_proto_rawDescGZIP() []byte { + file_google_api_resource_proto_rawDescOnce.Do(func() { + file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData) + }) + return file_google_api_resource_proto_rawDescData +} + +var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_resource_proto_goTypes = []interface{}{ + (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History + (ResourceDescriptor_Style)(0), // 1: google.api.ResourceDescriptor.Style + (*ResourceDescriptor)(nil), // 2: google.api.ResourceDescriptor + (*ResourceReference)(nil), // 3: google.api.ResourceReference + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions +} +var file_google_api_resource_proto_depIdxs = []int32{ + 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History + 1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style + 4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions + 5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions + 6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions + 3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference + 2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor + 2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 5, // [5:8] is the sub-list for extension type_name + 2, // [2:5] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_api_resource_proto_init() } +func file_google_api_resource_proto_init() { + if File_google_api_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_resource_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_resource_proto_goTypes, + DependencyIndexes: file_google_api_resource_proto_depIdxs, + EnumInfos: file_google_api_resource_proto_enumTypes, + MessageInfos: file_google_api_resource_proto_msgTypes, + ExtensionInfos: file_google_api_resource_proto_extTypes, + }.Build() + File_google_api_resource_proto = out.File + file_google_api_resource_proto_rawDesc = nil + file_google_api_resource_proto_goTypes = nil + file_google_api_resource_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go new file mode 100644 index 00000000..1d8397b0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -0,0 +1,693 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/routing.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the routing information that should be sent along with the request +// in the form of routing header. +// **NOTE:** All service configuration rules follow the "last one wins" order. +// +// The examples below will apply to an RPC which has the following request type: +// +// Message Definition: +// +// message Request { +// // The name of the Table +// // Values can be of the following formats: +// // - `projects//tables/` +// // - `projects//instances//tables/
` +// // - `region//zones//tables/
` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } +// +// Example message: +// +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } +// +// The routing header consists of one or multiple key-value pairs. Every key +// and value must be percent-encoded, and joined together in the format of +// `key1=value1&key2=value2`. +// In the examples below I am skipping the percent-encoding for readablity. +// +// # Example 1 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key equal to the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; +// +// result: +// +// x-goog-request-params: app_profile_id=profiles/prof_qux +// +// # Example 2 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key different from the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 3 +// +// Extracting a field from the request to put into the routing +// header, while matching a path template syntax on the field's value. +// +// NB: it is more useful to send nothing than to send garbage for the purpose +// of dynamic routing, since garbage pollutes cache. Thus the matching. +// +// # Sub-example 3a +// +// The field matches the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Sub-example 3b +// +// The field does not match the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; +// +// result: +// +// +// +// # Sub-example 3c +// +// Multiple alternative conflictingly named path templates are +// specified. The one that matches is used to construct the header. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Example 4 +// +// Extracting a single routing header key-value pair by matching a +// template syntax on (a part of) a single request field. +// +// annotation: +// +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=projects/proj_foo +// +// # Example 5 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on (parts of) a single request +// field. The last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar +// +// # Example 6 +// +// Extracting multiple routing header key-value pairs by matching +// several non-conflicting path templates on (parts of) a single request field. +// +// # Sub-example 6a +// +// Make the templates strict, so that if the `table_name` does not +// have an instance information, nothing is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Sub-example 6b +// +// Make the templates loose, so that if the `table_name` does not +// have an instance information, just the project id part is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result (is the same as 6a for our example message because it has the instance +// information): +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Example 7 +// +// Extracting multiple routing header key-value pairs by matching +// several path templates on multiple request fields. +// +// NB: note that here there is no way to specify sending nothing if one of the +// fields does not match its template. E.g. if the `table_name` is in the wrong +// format, the `project_id` will not be sent, but the `routing_id` will be. +// The backend routing code has to be aware of that and be prepared to not +// receive a full complement of keys if it expects multiple. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// +// # Example 8 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on several request fields. The +// last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 9 +// +// Bringing it all together. +// +// annotation: +// +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux +type RoutingRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of Routing Parameter specifications. + // **NOTE:** If multiple Routing Parameters describe the same key + // (via the `path_template` field or via the `field` field when + // `path_template` is not provided), "last one wins" rule + // determines which Parameter gets used. + // See the examples for more details. + RoutingParameters []*RoutingParameter `protobuf:"bytes,2,rep,name=routing_parameters,json=routingParameters,proto3" json:"routing_parameters,omitempty"` +} + +func (x *RoutingRule) Reset() { + *x = RoutingRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingRule) ProtoMessage() {} + +func (x *RoutingRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingRule.ProtoReflect.Descriptor instead. +func (*RoutingRule) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{0} +} + +func (x *RoutingRule) GetRoutingParameters() []*RoutingParameter { + if x != nil { + return x.RoutingParameters + } + return nil +} + +// A projection from an input message to the GRPC or REST header. +type RoutingParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A request field to extract the header key-value pair from. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A pattern matching the key-value field. Optional. + // If not specified, the whole field specified in the `field` field will be + // taken as value, and its name used as key. If specified, it MUST contain + // exactly one named segment (along with any number of unnamed segments) The + // pattern will be matched over the field specified in the `field` field, then + // if the match is successful: + // - the name of the single named segment will be used as a header name, + // - the match value of the segment will be used as a header value; + // if the match is NOT successful, nothing will be sent. + // + // Example: + // + // -- This is a field in the request message + // | that the header value will be extracted from. + // | + // | -- This is the key name in the + // | | routing header. + // V | + // field: "table_name" v + // path_template: "projects/*/{table_location=instances/*}/tables/*" + // ^ ^ + // | | + // In the {} brackets is the pattern that -- | + // specifies what to extract from the | + // field as a value to be sent. | + // | + // The string in the field must match the whole pattern -- + // before brackets, inside brackets, after brackets. + // + // When looking at this specific example, we can see that: + // - A key-value pair with the key `table_location` + // and the value matching `instances/*` should be added + // to the x-goog-request-params routing header. + // - The value is extracted from the request message's `table_name` field + // if it matches the full pattern specified: + // `projects/*/instances/*/tables/*`. + // + // **NB:** If the `path_template` field is not provided, the key name is + // equal to the field name, and the whole field should be sent as a value. + // This makes the pattern for the field and the value functionally equivalent + // to `**`, and the configuration + // + // { + // field: "table_name" + // } + // + // is a functionally equivalent shorthand to: + // + // { + // field: "table_name" + // path_template: "{table_name=**}" + // } + // + // See Example 1 for more details. + PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"` +} + +func (x *RoutingParameter) Reset() { + *x = RoutingParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingParameter) ProtoMessage() {} + +func (x *RoutingParameter) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingParameter.ProtoReflect.Descriptor instead. +func (*RoutingParameter) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{1} +} + +func (x *RoutingParameter) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *RoutingParameter) GetPathTemplate() string { + if x != nil { + return x.PathTemplate + } + return "" +} + +var file_google_api_routing_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*RoutingRule)(nil), + Field: 72295729, + Name: "google.api.routing", + Tag: "bytes,72295729,opt,name=routing", + Filename: "google/api/routing.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See RoutingRule. + // + // optional google.api.RoutingRule routing = 72295729; + E_Routing = &file_google_api_routing_proto_extTypes[0] +) + +var File_google_api_routing_proto protoreflect.FileDescriptor + +var file_google_api_routing_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x22, 0x4d, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x3a, 0x54, 0x0a, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb1, + 0xca, 0xbc, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0c, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_routing_proto_rawDescOnce sync.Once + file_google_api_routing_proto_rawDescData = file_google_api_routing_proto_rawDesc +) + +func file_google_api_routing_proto_rawDescGZIP() []byte { + file_google_api_routing_proto_rawDescOnce.Do(func() { + file_google_api_routing_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_routing_proto_rawDescData) + }) + return file_google_api_routing_proto_rawDescData +} + +var file_google_api_routing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_routing_proto_goTypes = []interface{}{ + (*RoutingRule)(nil), // 0: google.api.RoutingRule + (*RoutingParameter)(nil), // 1: google.api.RoutingParameter + (*descriptorpb.MethodOptions)(nil), // 2: google.protobuf.MethodOptions +} +var file_google_api_routing_proto_depIdxs = []int32{ + 1, // 0: google.api.RoutingRule.routing_parameters:type_name -> google.api.RoutingParameter + 2, // 1: google.api.routing:extendee -> google.protobuf.MethodOptions + 0, // 2: google.api.routing:type_name -> google.api.RoutingRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_routing_proto_init() } +func file_google_api_routing_proto_init() { + if File_google_api_routing_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_routing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_routing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_routing_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_routing_proto_goTypes, + DependencyIndexes: file_google_api_routing_proto_depIdxs, + MessageInfos: file_google_api_routing_proto_msgTypes, + ExtensionInfos: file_google_api_routing_proto_extTypes, + }.Build() + File_google_api_routing_proto = out.File + file_google_api_routing_proto_rawDesc = nil + file_google_api_routing_proto_goTypes = nil + file_google_api_routing_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 00000000..498020e3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 00000000..737d6876 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,680 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given [proto.Message]. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields and enum name values are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int +} + +// Unmarshal reads the given []byte and populates the given [proto.Message] +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + if val.IsValid() { + m.Set(fd, val) + } + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + if discardUnknown { + return protoreflect.Value{}, true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + if val.IsValid() { + list.Append(val) + } + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + if pval.IsValid() { + mmap.Set(pkey, pval) + } + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 00000000..ae71007c --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard [encoding/json] +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 00000000..0e72d853 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,380 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given [proto.Message] in JSON format using default options. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // ╔═══════╤════════════════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════════════════╣ + // ║ false │ non-optional scalar boolean fields ║ + // ║ 0 │ non-optional scalar numeric fields ║ + // ║ "" │ non-optional scalar string/byte fields ║ + // ║ [] │ empty repeated fields ║ + // ║ {} │ empty map fields ║ + // ╚═══════╧════════════════════════════════════════╝ + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given [proto.Message] in the JSON format using options in +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(b, o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return append(b, '{', '}'), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + if fd.HasPresence() { + if m.skipNull { + continue + } + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 00000000..e9fe1039 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,880 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + var open int + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") + } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil + } + } +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type, + // for compatibility with other proto runtimes that have interpreted the spec differently. + if m.Descriptor().FullName() != genid.Empty_message_fullname { + return d.newError(tok.Pos(), `missing "value" field`) + } + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112b..b5380505 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } // Handle unknown fields. if fd == nil { diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 95967e81..1f57e661 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,15 +27,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -84,8 +86,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal writes the given [proto.Message] in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index a45625c8..87e46bd4 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu {rv.MethodByName("Values"), "Values"}, {rv.MethodByName("ReservedNames"), "ReservedNames"}, {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, }...) case protoreflect.EnumValueDescriptor: diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c..024ffebd 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 18f07568..5a57ef6f 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go new file mode 100644 index 00000000..bf1aba0e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -0,0 +1,18 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editionssupport defines constants for editions that are supported. +package editionssupport + +import "google.golang.org/protobuf/types/descriptorpb" + +const ( + Minimum = descriptorpb.Edition_EDITION_PROTO2 + Maximum = descriptorpb.Edition_EDITION_2023 + + // MaximumKnown is the maximum edition that is known to Go Protobuf, but not + // declared as supported. In other words: end users cannot use it, but + // testprotos inside Go Protobuf can. + MaximumKnown = descriptorpb.Edition_EDITION_2024 +) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 00000000..ea1d3e65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind&(Name|comma) != 0 || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 00000000..2999d713 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 00000000..f7fea7d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 00000000..50578d65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 00000000..934f2dcb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 373d2083..669133d0 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -26,12 +26,13 @@ var byteType = reflect.TypeOf(byte(0)) // The type is the underlying field type (e.g., a repeated field may be // represented by []T, but the Go type passed in is just T). // A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). +// This does not populate the Enum or Message. // // This function is a best effort attempt; parsing errors are ignored. func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -107,11 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) + f.L1.EditionFeatures.IsPacked = true case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { // the exact same semantics from the previous generator. tag = append(tag, "json="+jsonName) } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e78..099b2bf4 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index 20c17b35..c2d6bd52 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go deleted file mode 100644 index fbcd3492..00000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package errors - -import "reflect" - -// Is is a copy of Go 1.13's errors.Is for use with older Go versions. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - if err = unwrap(err); err == nil { - return false - } - } -} - -func unwrap(err error) error { - u, ok := err.(interface { - Unwrap() error - }) - if !ok { - return nil - } - return u.Unwrap() -} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go deleted file mode 100644 index 5e72f1cd..00000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package errors - -import "errors" - -// Is is errors.Is. -func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 8826bcf4..688aabe4 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,6 +7,7 @@ package filedesc import ( "bytes" "fmt" + "strings" "sync" "sync/atomic" @@ -18,7 +19,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // Edition is an Enum for proto2.Edition @@ -31,6 +31,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -76,31 +77,48 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool + // APILevel controls which API (Open, Hybrid or Opaque) should be used + // for generated code (.pb.go files). + APILevel int } ) @@ -108,9 +126,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -202,6 +223,9 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -250,11 +274,7 @@ type ( Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -331,8 +351,7 @@ func (fd *Field) HasPresence() bool { if fd.L1.Cardinality == protoreflect.Repeated { return false } - explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence - return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional @@ -345,17 +364,11 @@ func (fd *Field) IsPacked() bool { case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: return false } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsPacked - } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { - // proto3 repeated fields are packed by default. - return !fd.L1.HasPacked || fd.L1.IsPacked - } - return fd.L1.IsPacked + return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsWeak() bool { return false } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -381,13 +394,12 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } func (fd *Field) Message() protoreflect.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(protoreflect.MessageDescriptor) - } - } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} @@ -399,13 +411,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsUTF8Validated - } - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + return fd.L1.EditionFeatures.IsUTF8Validated } func (od *Oneof) IsSynthetic() bool { @@ -432,13 +438,13 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -461,9 +467,19 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } @@ -542,8 +558,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} ) type ( @@ -585,6 +602,34 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -605,7 +650,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { + if isGroupLike(fd) { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 237e64fd..d2f54949 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 case "editions": fd.L1.Syntax = protoreflect.Editions default: @@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 } - if fd.L1.Syntax == protoreflect.Editions { - fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) - } + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) // Parse editions features from options if any if options != nil { @@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -467,6 +470,40 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -499,7 +536,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 482a61cc..d4c94458 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -32,11 +32,6 @@ func (file *File) resolveMessages() { for j := range md.L2.Fields.List { fd := &md.L2.Fields.List[j] - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - // Resolve message field dependency. switch fd.L1.Kind { case protoreflect.EnumKind: @@ -45,6 +40,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. @@ -145,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) { switch num { case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -466,10 +464,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } - if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { fd.L1.Kind = protoreflect.GroupKind } - if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + if fd.L1.EditionFeatures.IsLegacyRequired { fd.L1.Cardinality = protoreflect.Required } if rawTypeName != nil { @@ -496,13 +494,11 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -548,7 +544,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte - xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -572,7 +567,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -580,12 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } - if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { - xd.L1.Kind = protoreflect.GroupKind - } - if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { - xd.L1.Cardinality = protoreflect.Required - } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -598,32 +586,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldOptions_Features_field_number: - xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 30db19fd..f4107c05 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,6 +8,7 @@ package filedesc import ( "fmt" + "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 0375a49d..10132c9b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -14,9 +14,13 @@ import ( ) var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} func init() { unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) } func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { @@ -28,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_ApiLevel_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.APILevel = int(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } @@ -64,7 +76,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } @@ -104,12 +116,15 @@ func unmarshalEditionDefault(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: fs = unmarshalFeatureSet(v, fs) } } } defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) } func unmarshalEditionDefaults(b []byte) { @@ -135,8 +150,15 @@ func unmarshalEditionDefaults(b []byte) { } func getFeaturesFor(ed Edition) EditionFeatures { - if def, ok := defaultsCache[ed]; ok { - return def + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) } - panic(fmt.Sprintf("unsupported edition: %v", ed)) + return defaultsCache[match] } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index 28240ebc..bfb3b841 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4e..e1b4130b 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -63,12 +63,12 @@ type Builder struct { // message declarations in "flattened ordering". // // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of + // message fields, for parent extended messages of // extension fields, for enums or messages referenced by extension fields, // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd3..a06ccabc 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -6,7 +6,7 @@ package flags // ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior +// such as MessageSets, and various other obscure behavior // that is necessary to maintain backwards compatibility with proto1 or // the pre-release variants of proto2 and proto3. // diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 40272c89..f30ab6b5 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -21,6 +21,7 @@ const ( // Enum values for google.protobuf.Edition. const ( Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 Edition_EDITION_PROTO2_enum_value = 998 Edition_EDITION_PROTO3_enum_value = 999 Edition_EDITION_2023_enum_value = 1000 @@ -653,6 +654,7 @@ const ( FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -667,6 +669,7 @@ const ( FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -684,6 +687,7 @@ const ( FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -767,6 +771,33 @@ const ( FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -829,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -842,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -1110,17 +1144,20 @@ const ( // Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" ) // Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 ) // Names for google.protobuf.SourceCodeInfo. diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd012..d9b9d916 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index fd9015e8..f5ee7f5c 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -10,22 +10,61 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" ) -const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_name protoreflect.Name = "api_level" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_fullname protoreflect.FullName = "pb.GoFeatures.api_level" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_ApiLevel_field_number protoreflect.FieldNumber = 2 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.APILevel. +const ( + GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel" + GoFeatures_APILevel_enum_name = "APILevel" +) + +// Enum values for pb.GoFeatures.APILevel. +const ( + GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0 + GoFeatures_API_OPEN_enum_value = 1 + GoFeatures_API_HYBRID_enum_value = 2 + GoFeatures_API_OPAQUE_enum_value = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 +) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go index 693d2e9e..99bb95ba 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -11,15 +11,10 @@ const ( SizeCache_goname = "sizeCache" SizeCacheA_goname = "XXX_sizecache" - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - UnknownFields_goname = "unknownFields" UnknownFieldsA_goname = "XXX_unrecognized" ExtensionFields_goname = "extensionFields" ExtensionFieldsA_goname = "XXX_InternalExtensions" ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02f..bef5a25f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/name.go b/vendor/google.golang.org/protobuf/internal/genid/name.go new file mode 100644 index 00000000..224f3393 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/name.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +const ( + NoUnkeyedLiteral_goname = "noUnkeyedLiteral" + NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral" + + BuilderSuffix_goname = "_builder" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b8..9404270d 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98d..5d5771c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go new file mode 100644 index 00000000..6075d6f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go @@ -0,0 +1,128 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "strconv" + "sync/atomic" + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (Export) UnmarshalField(msg any, fieldNum int32) { + UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum)) +} + +// Present checks the presence set for a certain field number (zero +// based, ordered by appearance in original proto file). part is +// a pointer to the correct element in the bitmask array, num is the +// field number unaltered. Example (field number 70 -> part = +// &m.XXX_presence[1], num = 70) +func (Export) Present(part *uint32, num uint32) bool { + // This hook will read an unprotected shadow presence set if + // we're unning under the race detector + raceDetectHookPresent(part, num) + return atomic.LoadUint32(part)&(1<<(num%32)) > 0 +} + +// SetPresent adds a field to the presence set. part is a pointer to +// the relevant element in the array and num is the field number +// unaltered. size is the number of fields in the protocol +// buffer. +func (Export) SetPresent(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) { + return + } + } +} + +// SetPresentNonAtomic is like SetPresent, but operates non-atomically. +// It is meant for use by builder methods, where the message is known not +// to be accessible yet by other goroutines. +func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + *part |= 1 << (num % 32) +} + +// ClearPresence removes a field from the presence set. part is a +// pointer to the relevant element in the presence array and num is +// the field number unaltered. +func (Export) ClearPresent(part *uint32, num uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookClearPresent(part, num) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) { + return + } + } +} + +// interfaceToPointer takes a pointer to an empty interface whose value is a +// pointer type, and converts it into a "pointer" that points to the same +// target +func interfaceToPointer(i *any) pointer { + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +func (p pointer) atomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) atomicSetPointer(q pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), q.p) +} + +// AtomicCheckPointerIsNil takes an interface (which is a pointer to a +// pointer) and returns true if the pointed-to pointer is nil (using an +// atomic load). This function is inlineable and, on x86, just becomes a +// simple load and compare. +func (Export) AtomicCheckPointerIsNil(ptr any) bool { + return interfaceToPointer(&ptr).atomicGetPointer().IsNil() +} + +// AtomicSetPointer takes two interfaces (first is a pointer to a pointer, +// second is a pointer) and atomically sets the second pointer into location +// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline +// (even on x86), so this does not become a simple store on x86. +func (Export) AtomicSetPointer(dstPtr, valPtr any) { + interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr)) +} + +// AtomicLoadPointer loads the pointer at the location pointed at by src, +// and stores that pointer value into the location pointed at by dst. +func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) +} + +// AtomicInitializePointer makes ptr and dst point to the same value. +// +// If *ptr is a nil pointer, it sets *ptr = *dst. +// +// If *ptr is a non-nil pointer, it sets *dst = *ptr. +func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) { + if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) + } +} + +// MessageFieldStringOf returns the field formatted as a string, +// either as the field name if resolvable otherwise as a decimal string. +func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string { + fd := md.Fields().ByNumber(n) + if fd != nil { + return string(fd.Name()) + } + return strconv.Itoa(int(n)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go new file mode 100644 index 00000000..ea276547 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package impl + +// There is no additional data as we're not running under race detector. +type RaceDetectHookData struct{} + +// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away. +func (presence) raceDetectHookPresent(num uint32) {} +func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {} +func (presence) raceDetectHookClearPresent(num uint32) {} +func (presence) raceDetectHookAllocAndCopy(src presence) {} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) {} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) {} diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go new file mode 100644 index 00000000..e9a27583 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go @@ -0,0 +1,126 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package impl + +// When running under race detector, we add a presence map of bytes, that we can access +// in the hook functions so that we trigger the race detection whenever we have concurrent +// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent +// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations. +type RaceDetectHookData struct { + shadowPresence *[]byte +} + +// Hooks for presence bitmap operations that allocate, read and write the shadowPresence +// using non-atomic operations. +func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) { + sp := make([]byte, size) + atomicStoreShadowPresence(&data.shadowPresence, &sp) +} + +func (p presence) raceDetectHookPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +func (p presence) raceDetectHookClearPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + + } +} + +// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies +// shadowPresence bytes from src to lazy. +func (p presence) raceDetectHookAllocAndCopy(q presence) { + sData := q.toRaceDetectData() + dData := p.toRaceDetectData() + if sData == nil { + return + } + srcSp := atomicLoadShadowPresence(&sData.shadowPresence) + if srcSp == nil { + atomicStoreShadowPresence(&dData.shadowPresence, nil) + return + } + n := len(*srcSp) + dSlice := make([]byte, n) + atomicStoreShadowPresence(&dData.shadowPresence, &dSlice) + for i := 0; i < n; i++ { + dSlice[i] = (*srcSp)[i] + } +} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041ed..fe2c719c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { } return nil } + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + if mi.extensionOffset.IsValid() { e := p.Apply(mi.extensionOffset).Extensions() if err := mi.isInitExtensions(e); err != nil { @@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { if !f.isRequired && f.funcs.isInit == nil { continue } + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit != nil { + f.mi.init() + if f.mi.needsInitCheck { + if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() { + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + if !lazy.AllowedPartial() { + // Nothing to see here, it was checked on unmarshal + continue + } + mi.lazyUnmarshal(p, f.num) + } + if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil { + return err + } + } + } + continue + } + fptr := p.Apply(f.offset) if f.isPointer && fptr.Elem().IsNil() { if f.isRequired { @@ -68,7 +101,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c..0d5b546e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -99,6 +98,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() @@ -136,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -152,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 3fadd241..d14d7d93 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -5,15 +5,12 @@ package impl import ( - "fmt" "reflect" - "sync" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" ) @@ -65,6 +62,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } @@ -118,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si } } -func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -233,9 +161,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -262,14 +196,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -405,8 +346,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -482,10 +423,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -520,28 +465,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) + before := len(b) + b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -582,11 +533,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } @@ -597,13 +549,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -651,11 +607,12 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } @@ -738,12 +695,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go new file mode 100644 index 00000000..76818ea2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go @@ -0,0 +1,264 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + mi := getMessageInfo(ft) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessage, + marshal: appendOpaqueMessage, + unmarshal: consumeOpaqueMessage, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroup, + marshal: appendOpaqueGroup, + unmarshal: consumeOpaqueGroup, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize +} + +func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + mp := p.AtomicGetPointer() + calculatedSize := f.mi.sizePointer(mp, opts) + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, mp, opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err +} + +func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error { + mp := p.AtomicGetPointer() + if mp.IsNil() { + return nil + } + return f.mi.checkInitializedPointer(mp) +} + +func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstmp := dst.AtomicGetPointer() + if dstmp.IsNil() { + dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts) +} + +func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts) +} + +func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, e := f.mi.unmarshalPointer(b, mp, f.num, opts) + return o, e +} + +func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft)) + } + mt := ft.Elem().Elem() // *[]*T -> *T + mi := getMessageInfo(mt) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessageSlice, + marshal: appendOpaqueMessageSlice, + unmarshal: consumeOpaqueMessageSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroupSlice, + marshal: appendOpaqueGroupSlice, + unmarshal: consumeOpaqueGroupSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error { + sp := p.AtomicGetPointer() + if sp.IsNil() { + return nil + } + s := sp.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + ds := dst.AtomicGetPointer() + if ds.IsNil() { + ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + for _, sp := range src.AtomicGetPointer().PointerSlice() { + dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + f.mi.mergePointer(dm, sp, opts) + ds.AppendPointerSlice(dm) + } +} + +func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + out, err = f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 111b9d16..229c6980 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,6 +9,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -93,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) + before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err } } @@ -270,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -317,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -325,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -345,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -360,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -375,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66e..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb7..f78b57b0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -32,6 +32,10 @@ type coderMessageInfo struct { needsInitCheck bool isMessageSet bool numRequiredFields uint8 + + lazyOffset offset + presenceOffset offset + presenceSize presenceSize } type coderFieldInfo struct { @@ -45,12 +49,19 @@ type coderFieldInfo struct { tagsize int // size of the varint-encoded tag isPointer bool // true if IsNil may be called on the struct field isRequired bool // true if field is required + + isLazy bool + presenceIndex uint32 } +const noPresence = 0xffffffff + func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.sizecacheOffset = invalidOffset mi.unknownOffset = invalidOffset mi.extensionOffset = invalidOffset + mi.lazyOffset = invalidOffset + mi.presenceOffset = si.presenceOffset if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { mi.sizecacheOffset = si.sizecacheOffset @@ -107,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) + fieldOffset = offsetOf(fs) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] @@ -127,6 +135,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { validation: newFieldValidationInfo(mi, si, fd, ft), isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), isRequired: fd.Cardinality() == protoreflect.Required, + + presenceIndex: noPresence, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -189,6 +199,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go new file mode 100644 index 00000000..41c1f74e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -0,0 +1,153 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + mi.extensionOffset = si.extensionOffset + mi.lazyOffset = si.lazyOffset + mi.presenceOffset = si.presenceOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fieldOffset = offsetOf(fs) + case fd.Message() != nil && !fd.IsMap(): + fieldOffset = offsetOf(fs) + if fd.IsList() { + childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) + } else { + childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) + } + default: + fieldOffset = offsetOf(fs) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si.structInfo, fd, ft), + isPointer: (fd.Cardinality() == protoreflect.Repeated || + fd.Kind() == protoreflect.MessageKind || + fd.Kind() == protoreflect.GroupKind), + isRequired: fd.Cardinality() == protoreflect.Required, + presenceIndex: noPresence, + } + + // TODO: Use presence for all fields. + // + // In some cases, such as maps, presence means only "might be set" rather + // than "is definitely set", but every field should have a presence bit to + // permit us to skip over definitely-unset fields at marshal time. + + var hasPresence bool + hasPresence, cf.isLazy = usePresenceForField(si, fd) + + if hasPresence { + cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) + } + + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si.structInfo) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense protoreflect.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) > len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf..7a16ec13 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577b..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e2..077712c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2ef..f72ddd88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f8913651..18cb96fd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a05..e4580b3a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index cda0520c..e0dd21fa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { AllowPartial: true, DiscardUnknown: o.DiscardUnknown(), Resolver: o.resolver, + + NoLazyDecoding: o.NoLazyDecoding(), } } @@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&protoiface.UnmarshalDiscardUnknown != 0 } -func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == protoregistry.GlobalTypes +func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 } +func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 } +func (o unmarshalOptions) NoLazyDecoding() bool { + return o.flags&protoiface.UnmarshalNoLazyDecoding != 0 +} + +func (o unmarshalOptions) CanBeLazy() bool { + if o.resolver != protoregistry.GlobalTypes { + return false + } + // We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set + return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0 } var lazyUnmarshalOptions = unmarshalOptions{ resolver: protoregistry.GlobalTypes, - depth: protowire.DefaultRecursionLimit, + + flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated, + + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } + + lazyDecoding := LazyEnabled() // default + if opts.NoLazyDecoding() { + lazyDecoding = false // explicitly disabled + } + if mi.lazyOffset.IsValid() && lazyDecoding { + return mi.unmarshalPointerLazy(b, p, groupTag, opts) + } + return mi.unmarshalPointerEager(b, p, groupTag, opts) +} + +// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy. +// The corresponding function for Lazy is in google_lazy.go. +func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true var requiredMask uint64 var exts *map[int32]ExtensionField + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + start := len(b) for len(b) > 0 { // Parse the tag (field number and wire type). @@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if f.funcs.isInit != nil && !o.initialized { initialized = false } + + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: // Possible extension. if exts == nil && mi.extensionOffset.IsValid() { @@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, errUnknown } if flags.LazyUnmarshalExtensions { - if opts.IsDefault() && x.canLazy(xt) { + if opts.CanBeLazy() && x.canLazy(xt) { out, valid := skipExtension(b, xi, num, wtyp, opts) switch valid { case ValidationValid: @@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp if n < 0 { return out, ValidationUnknown } + + if opts.Validated() { + out.initialized = true + out.n = n + return out, ValidationValid + } + out, st := xi.validation.mi.validate(v, 0, opts) out.n = n return out, st diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6..b2e21229 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,8 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -49,8 +50,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +64,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -68,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int e := p.Apply(mi.extensionOffset).Extensions() size += mi.sizeExtensions(e, opts) } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.size == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + + if f.isLazy && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + size += (*lazy).SizeField(uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + size += f.funcs.size(fptr, f, opts) + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -84,13 +116,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -128,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, err } } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.marshal == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + if f.isLazy { + // Be careful, this field needs to be read atomically, like for a get + if f.isPointer && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + b, _ = (*lazy).AppendField(b, uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } else if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -149,6 +225,22 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + +// lazyFields returns true if we should attempt to keep fields lazy over size and marshal. +func lazyFields(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // fields to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +250,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +276,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +298,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000..9f6c32a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0ba..e31249f6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go new file mode 100644 index 00000000..c7de31e2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -0,0 +1,433 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math/bits" + "os" + "reflect" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +var enableLazy int32 = func() int32 { + if os.Getenv("GOPROTODEBUG") == "nolazy" { + return 0 + } + return 1 +}() + +// EnableLazyUnmarshal enables lazy unmarshaling. +func EnableLazyUnmarshal(enable bool) { + if enable { + atomic.StoreInt32(&enableLazy, 1) + return + } + atomic.StoreInt32(&enableLazy, 0) +} + +// LazyEnabled reports whether lazy unmarshalling is currently enabled. +func LazyEnabled() bool { + return atomic.LoadInt32(&enableLazy) != 0 +} + +// UnmarshalField unmarshals a field in a message. +func UnmarshalField(m interface{}, num protowire.Number) { + switch m := m.(type) { + case *messageState: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + case *messageReflectWrapper: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + default: + panic(fmt.Sprintf("unsupported wrapper type %T", m)) + } +} + +func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) { + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + if f == nil { + panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num)) + } + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num)) + if !found && multipleEntries == nil { + panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num)) + } + // The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races. + // Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil. + fp := pointerOfValue(reflect.New(f.ft)) + if multipleEntries != nil { + for _, entry := range multipleEntries { + mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags()) + } + } else { + mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags()) + } + p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem()) +} + +func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error { + opts := lazyUnmarshalOptions + opts.flags |= flags + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return errors.New("invalid wire data") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + if num == f.num { + o, err := f.funcs.unmarshal(b, p, wtyp, f, opts) + if err == nil { + b = b[o.n:] + continue + } + if err != errUnknown { + return err + } + } + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + return nil +} + +func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + fmi := f.validation.mi + if fmi == nil { + fd := mi.Desc.Fields().ByNumber(f.num) + if fd == nil { + return out, ValidationUnknown + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + if err != nil { + return out, ValidationUnknown + } + var ok bool + fmi, ok = messageType.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + } + fmi.init() + switch f.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationWrongWireType + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationInvalid + } + out, st := fmi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationWrongWireType + } + out, st := fmi.validate(b, f.num, opts) + return out, st + default: + return out, ValidationUnknown + } +} + +// unmarshalPointerLazy is similar to unmarshalPointerEager, but it +// specifically handles lazy unmarshalling. it expects lazyOffset and +// presenceOffset to both be valid. +func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true + var requiredMask uint64 + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + var lazyIndex []protolazy.IndexEntry + var lastNum protowire.Number + outOfOrder := false + lazyDecode := false + presence = p.Apply(mi.presenceOffset).PresenceInfo() + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + if !presence.AnyPresent(mi.presenceSize) { + if opts.CanBeLazy() { + // If the message contains existing data, we need to merge into it. + // Lazy unmarshaling doesn't merge, so only enable it when the + // message is empty (has no presence bitmap). + lazyDecode = true + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + (*lazy).SetUnmarshalFlags(opts.flags) + if !opts.AliasBuffer() { + // Make a copy of the buffer for lazy unmarshaling. + // Set the AliasBuffer flag so recursive unmarshal + // operations reuse the copy. + b = append([]byte{}, b...) + opts.flags |= piface.UnmarshalAliasBuffer + } + (*lazy).SetBuffer(b) + } + } + // Track special handling of lazy fields. + // + // In the common case, all fields are lazyValidateOnly (and lazyFields remains nil). + // In the event that validation for a field fails, this map tracks handling of the field. + type lazyAction uint8 + const ( + lazyValidateOnly lazyAction = iota // validate the field only + lazyUnmarshalNow // eagerly unmarshal the field + lazyUnmarshalLater // unmarshal the field after the message is fully processed + ) + var lazyFields map[*coderFieldInfo]lazyAction + var exts *map[int32]ExtensionField + start := len(b) + pos := 0 + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + discardUnknown := false + Field: + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + if f.isLazy && lazyDecode { + switch { + case lazyFields == nil || lazyFields[f] == lazyValidateOnly: + // Attempt to validate this field and leave it for later lazy unmarshaling. + o, valid := mi.skipField(b, f, wtyp, opts) + switch valid { + case ValidationValid: + // Skip over the valid field and continue. + err = nil + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + requiredMask |= f.validation.requiredBit + if !o.initialized { + initialized = false + } + n = o.n + break Field + case ValidationInvalid: + return out, errors.New("invalid proto wire format") + case ValidationWrongWireType: + break Field + case ValidationUnknown: + if lazyFields == nil { + lazyFields = make(map[*coderFieldInfo]lazyAction) + } + if presence.Present(f.presenceIndex) { + // We were unable to determine if the field is valid or not, + // and we've already skipped over at least one instance of this + // field. Clear the presence bit (so if we stop decoding early, + // we don't leave a partially-initialized field around) and flag + // the field for unmarshaling before we return. + presence.ClearPresent(f.presenceIndex) + lazyFields[f] = lazyUnmarshalLater + discardUnknown = true + break Field + } else { + // We were unable to determine if the field is valid or not, + // but this is the first time we've seen it. Flag it as needing + // eager unmarshaling and fall through to the eager unmarshal case below. + lazyFields[f] = lazyUnmarshalNow + } + } + case lazyFields[f] == lazyUnmarshalLater: + // This field will be unmarshaled in a separate pass below. + // Skip over it here. + discardUnknown = true + break Field + default: + // Eagerly unmarshal the field. + } + } + if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) { + if p.Apply(f.offset).AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(p, f.num) + } + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + end := start - len(b) + if lazyDecode && f != nil && f.isLazy { + if num != lastNum { + lazyIndex = append(lazyIndex, protolazy.IndexEntry{ + FieldNum: uint32(num), + Start: uint32(pos), + End: uint32(end), + }) + } else { + i := len(lazyIndex) - 1 + lazyIndex[i].End = uint32(end) + lazyIndex[i].MultipleContiguous = true + } + } + if num < lastNum { + outOfOrder = true + } + pos = end + lastNum = num + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if lazyFields != nil { + // Some fields failed validation, and now need to be unmarshaled. + for f, action := range lazyFields { + if action != lazyUnmarshalLater { + continue + } + initialized = false + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil { + return out, err + } + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + } + if lazyDecode { + if outOfOrder { + sort.Slice(lazyIndex, func(i, j int) bool { + return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum || + (lazyIndex[i].FieldNum == lazyIndex[j].FieldNum && + lazyIndex[i].Start < lazyIndex[j].Start) + }) + } + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + + (*lazy).SetIndex(lazyIndex) + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c2a803bb..81b2b1a7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 87b30d05..b6849d66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked + xd.L1.EditionFeatures = fd.L1.EditionFeatures xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index 9ab09108..b649f112 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io/ioutil" + "io" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := ioutil.ReadAll(zr) + b2, err := io.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 2ab2c629..a51dffbe 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,6 +204,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type methods := make([]reflect.Method, 0, 2) @@ -215,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -250,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -260,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -307,14 +310,11 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.HasPacked { + if fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } return opts.Interface() } @@ -344,6 +344,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { @@ -563,6 +564,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index 7e65f64f..8ffdce67 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { if src.IsNil() { return } + + var presenceSrc presence + var presenceDst presence + if mi.presenceOffset.IsValid() { + presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo() + presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo() + } + for _, f := range mi.orderedCoderFields { if f.funcs.merge == nil { continue } sfptr := src.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presenceSrc.Present(f.presenceIndex) { + continue + } + dfptr := dst.Apply(f.offset) + if f.isLazy { + if sfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(src, f.num) + } + if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(dst, f.num) + } + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + continue + } + if f.isPointer && sfptr.Elem().IsNil() { continue } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdc..d50423dc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,7 +14,6 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -30,12 +29,12 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +46,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -79,6 +78,9 @@ func (mi *MessageInfo) initOnce() { if mi.initDone == 1 { return } + if opaqueInitHook(mi) { + return + } t := mi.GoReflectType if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { @@ -117,7 +119,6 @@ type ( var ( sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) @@ -126,13 +127,14 @@ var ( type structInfo struct { sizecacheOffset offset sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type unknownOffset offset unknownType reflect.Type extensionOffset offset extensionType reflect.Type + lazyOffset offset + presenceOffset offset + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField oneofsByName map[protoreflect.Name]reflect.StructField oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber @@ -142,9 +144,10 @@ type structInfo struct { func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { si := structInfo{ sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, + lazyOffset: invalidOffset, + presenceOffset: invalidOffset, fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, oneofsByName: map[protoreflect.Name]reflect.StructField{}, @@ -157,24 +160,23 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) - si.weakType = f.Type - } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } + case "lazyFields", "XXX_lazyUnmarshalInfo": + si.lazyOffset = offsetOf(f) + case "XXX_presence": + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { @@ -201,7 +203,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -244,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { mi.init() fd := mi.Desc.Fields().Get(i) switch { - case fd.IsWeak(): - mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} default: @@ -256,7 +255,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go new file mode 100644 index 00000000..dd55e8e0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -0,0 +1,627 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync/atomic" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type opaqueStructInfo struct { + structInfo +} + +// isOpaque determines whether a protobuf message type is on the Opaque API. It +// checks whether the type is a Go struct that protoc-gen-go would generate. +// +// This function only detects newly generated messages from the v2 +// implementation of protoc-gen-go. It is unable to classify generated messages +// that are too old or those that are generated by a different generator +// such as protoc-gen-gogo. +func isOpaque(t reflect.Type) bool { + // The current detection mechanism is to simply check the first field + // for a struct tag with the "protogen" key. + if t.Kind() == reflect.Struct && t.NumField() > 0 { + pgt := t.Field(0).Tag.Get("protogen") + return strings.HasPrefix(pgt, "opaque.") + } + return false +} + +func opaqueInitHook(mi *MessageInfo) bool { + mt := mi.GoReflectType.Elem() + si := opaqueStructInfo{ + structInfo: mi.makeStructInfo(mt), + } + + if !isOpaque(mt) { + return false + } + + defer atomic.StoreUint32(&mi.initDone, 1) + + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} + fds := mi.Desc.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + usePresence, _ := usePresenceForField(si, fd) + + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneofs are no different for opaque. + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = mi.fieldInfoForMapOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil && usePresence: + fi = mi.fieldInfoForScalarListOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil: + // Proto3 lists without presence can use same access methods as open + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsList() && usePresence: + fi = mi.fieldInfoForMessageListOpaque(si, fd, fs) + case fd.IsList(): + // Proto3 opaque messages that does not need presence bitmap. + // Different representation than open struct, but same logic + fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs) + case fd.Message() != nil && usePresence: + fi = mi.fieldInfoForMessageOpaque(si, fd, fs) + case fd.Message() != nil: + // Proto3 messages without presence can use same access methods as open + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = mi.fieldInfoForScalarOpaque(si, fd, fs) + } + mi.fields[fd.Number()] = &fi + } + mi.oneofs = map[protoreflect.Name]*oneofInfo{} + for i := 0; i < mi.Desc.Oneofs().Len(); i++ { + od := mi.Desc.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + mi.makeExtensionFieldsFunc(mt, si.structInfo) + mi.makeUnknownFieldsFunc(mt, si.structInfo) + mi.makeOpaqueCoderMethods(mt, si) + mi.makeFieldTypes(si.structInfo) + + return true +} + +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + +func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) + } + fieldOffset := offsetOf(fs) + conv := NewConverter(ft, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + // Don't bother checking presence bits, since we need to + // look at the map length even if the presence bit is set. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting map field to read-only value")) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv) + }, + mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } + mi.setPresent(p, index) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) protoreflect.Value { + mi.setPresent(p, index) + return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type)) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if !mi.present(p, index) { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + if !mi.present(p, index) { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } else { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + } + rv := sp.AsValueOf(fs.Type.Elem()) + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return false + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + sp := p.Apply(fieldOffset).AtomicGetPointer() + if !sp.IsNil() { + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return conv.Zero() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() { + nullable = true + } + deref := false + if nullable && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + deref = true + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + var getter func(p pointer) protoreflect.Value + if !nullable { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset) + } + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if nullable { + return mi.present(p, index) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + if nullable { + mi.clearPresent(p, index) + } + // This is only valuable for bytes and strings, but we do it unconditionally. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: getter, + // TODO: Implement unsafe fast path for set? + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if deref { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + + rv.Set(conv.GoValueOf(v)) + if nullable && rv.Kind() == reflect.Slice && rv.IsNil() { + rv.Set(emptyBytes) + } + if nullable { + mi.setPresent(p, index) + } + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + elemType := fs.Type.Elem() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + return mi.present(p, index) + }, + clear: func(p pointer) { + mi.clearPresent(p, index) + p.Apply(fieldOffset).AtomicSetNilPointer() + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } + rv := mp.AsValueOf(elemType) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + val := pointerOfValue(conv.GoValueOf(v)) + if val.IsNil() { + panic("invalid nil pointer") + } + p.Apply(fieldOffset).AtomicSetPointer(val) + mi.setPresent(p, index) + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } else { + mp = pointerOfValue(conv.GoValueOf(conv.New())) + fp.AtomicSetPointer(mp) + mi.setPresent(p, index) + } + } + return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem())) + }, + newMessage: func() protoreflect.Message { + return conv.New().Message() + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +// A presenceList wraps a List, updating presence bits as necessary when the +// list contents change. +type presenceList struct { + pvalueList + setPresence func(bool) +} +type pvalueList interface { + protoreflect.List + //Unwrapper +} + +func (list presenceList) Append(v protoreflect.Value) { + list.pvalueList.Append(v) + list.setPresence(true) +} +func (list presenceList) Truncate(i int) { + list.pvalueList.Truncate(i) + list.setPresence(i > 0) +} + +// presenceIndex returns the index to pass to presence functions. +// +// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields. +func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) { + found := false + var index, numIndices uint32 + for i := 0; i < md.Fields().Len(); i++ { + f := md.Fields().Get(i) + if f == fd { + found = true + index = numIndices + } + if f.ContainingOneof() == nil || isLastOneofField(f) { + numIndices++ + } + } + if !found { + panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName())) + } + return index, presenceSize(numIndices) +} + +func isLastOneofField(fd protoreflect.FieldDescriptor) bool { + fields := fd.ContainingOneof().Fields() + return fields.Get(fields.Len()-1) == fd +} + +func (mi *MessageInfo) setPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize) +} + +func (mi *MessageInfo) clearPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index) +} + +func (mi *MessageInfo) present(p pointer, index uint32) bool { + return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) +} + +// usePresenceForField implements the somewhat intricate logic of when +// the presence bitmap is used for a field. The main logic is that a +// field that is optional or that can be lazy will use the presence +// bit, but for proto2, also maps have a presence bit. It also records +// if the field can ever be lazy, which is true if we have a +// lazyOffset and the field is a message or a slice of messages. A +// field that is lazy will always need a presence bit. Oneofs are not +// lazy and do not use presence, unless they are a synthetic oneof, +// which is a proto3 optional field. For proto3 optionals, we use the +// presence and they can also be lazy when applicable (a message). +func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() + + // Non-oneof scalar fields with explicit field presence use the presence array. + usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + return false, false + case fd.IsMap(): + return false, false + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + return hasLazyField, hasLazyField + default: + return usesPresenceArray || (hasLazyField && fd.HasPresence()), false + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go new file mode 100644 index 00000000..a6982569 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go @@ -0,0 +1,132 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + // Enums for nullable opaque types. + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index d9ea010b..0d20132f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: @@ -205,6 +203,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { case fd.IsList(): if fd.Enum() != nil || fd.Message() != nil { ft = fs.Type.Elem() + + if ft.Kind() == reflect.Slice { + ft = ft.Elem() + } + } isMessage = fd.Message() != nil case fd.Enum() != nil: @@ -214,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } case fd.Message() != nil: ft = fs.Type - if fd.IsWeak() { - ft = nil - } isMessage = true } if isMessage && ft != nil && ft.Kind() != reflect.Ptr { @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -247,39 +247,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if m == nil { return false } - xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) } -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xt.Zero() + return xd.Type().Zero() } -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { - xd := xt.TypeDescriptor() +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +292,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) } if *m == nil { @@ -302,16 +302,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xt.New() - m.Set(xt, v) + v := xd.Type().New() + m.Set(xd, v) return v } @@ -394,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -422,13 +421,13 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -457,7 +456,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd.Type() + return nil, xtd } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 986322b1..68d4ae32 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -8,11 +8,8 @@ import ( "fmt" "math" "reflect" - "sync" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { @@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -256,6 +253,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + var getter func(p pointer) protoreflect.Value if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { // This never occurs for generated message types. @@ -268,19 +266,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + + // Generate specialized getter functions to avoid going through reflect.Value + if nullable { + getter = getterForNullableScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { if p.IsNil() { return false } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable { - return !rv.IsNil() + return !p.Apply(fieldOffset).Elem().IsNil() } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() switch rv.Kind() { case reflect.Bool: return rv.Bool() @@ -300,21 +304,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) protoreflect.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable { - if rv.IsNil() { - return conv.Zero() - } - if rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - } - return conv.PBValueOf(rv) - }, + get: getter, + // TODO: Implement unsafe fast path for set? set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { @@ -338,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } -func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) protoreflect.Value { - lazyInit() - if p.IsNil() { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v protoreflect.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) protoreflect.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() protoreflect.Message { - lazyInit() - return messageType.New() - }, - newField: func() protoreflect.Value { - lazyInit() - return protoreflect.ValueOfMessage(messageType.New()) - }, - } -} - func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -425,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -472,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -485,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -503,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go new file mode 100644 index 00000000..af5e063a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go @@ -0,0 +1,273 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + elemType := fs.Type.Elem() + // Enums for nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType) + if rv.IsNil() { + return conv.Zero() + } + return conv.PBValueOf(rv.Elem()) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).BoolPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBool(**x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt32(**x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint32(**x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt64(**x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint64(**x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat32(**x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat64(**x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if len(*x) == 0 { + return conv.Zero() + } + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} + +func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if fd.Kind() == protoreflect.EnumKind { + // Enums for non nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + if len(*x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + return protoreflect.ValueOfString(*x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 741d6e5b..99dc23c6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,12 +23,13 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -143,12 +154,13 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index 517e9443..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer interface{} - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e31..62f8bf66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,15 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( "reflect" "sync/atomic" "unsafe" + + "google.golang.org/protobuf/internal/protolazy" ) const UnsafeEnabled = true @@ -23,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } @@ -50,7 +49,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +79,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } @@ -112,8 +111,14 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } +func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { + return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) +} + +func (p pointer) PresenceInfo() presence { + return presence{P: p.p} +} func (p pointer) Elem() pointer { return pointer{p: *(*unsafe.Pointer)(p.p)} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go new file mode 100644 index 00000000..38aa7b7d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go @@ -0,0 +1,42 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +func (p pointer) AtomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) AtomicSetPointer(v pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), v.p) +} + +func (p pointer) AtomicSetNilPointer() { + atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil)) +} + +func (p pointer) AtomicSetPointerIfNil(v pointer) pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) { + return v + } + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +type atomicV1MessageInfo struct{ p Pointer } + +func (mi *atomicV1MessageInfo) Get() Pointer { + return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p))) +} + +func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) { + return p + } + return mi.Get() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go new file mode 100644 index 00000000..914cb1de --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +// presenceSize represents the size of a presence set, which should be the largest index of the set+1 +type presenceSize uint32 + +// presence is the internal representation of the bitmap array in a generated protobuf +type presence struct { + // This is a pointer to the beginning of an array of uint32 + P unsafe.Pointer +} + +func (p presence) toElem(num uint32) (ret *uint32) { + const ( + bitsPerByte = 8 + siz = unsafe.Sizeof(*ret) + ) + // p.P points to an array of uint32, num is the bit in this array that the + // caller wants to check/manipulate. Calculate the index in the array that + // contains this specific bit. E.g.: 76 / 32 = 2 (integer division). + offset := uintptr(num) / (siz * bitsPerByte) * siz + return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset)) +} + +// Present checks for the presence of a specific field number in a presence set. +func (p presence) Present(num uint32) bool { + if p.P == nil { + return false + } + return Export{}.Present(p.toElem(num), num) +} + +// SetPresent adds presence for a specific field number in a presence set. +func (p presence) SetPresent(num uint32, size presenceSize) { + Export{}.SetPresent(p.toElem(num), num, uint32(size)) +} + +// SetPresentUnatomic adds presence for a specific field number in a presence set without using +// atomic operations. Only to be called during unmarshaling. +func (p presence) SetPresentUnatomic(num uint32, size presenceSize) { + Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size)) +} + +// ClearPresent removes presence for a specific field number in a presence set. +func (p presence) ClearPresent(num uint32) { + Export{}.ClearPresent(p.toElem(num), num) +} + +// LoadPresenceCache (together with PresentInCache) allows for a +// cached version of checking for presence without re-reading the word +// for every field. It is optimized for efficiency and assumes no +// simltaneous mutation of the presence set (or at least does not have +// a problem with simultaneous mutation giving inconsistent results). +func (p presence) LoadPresenceCache() (current uint32) { + if p.P == nil { + return 0 + } + return atomic.LoadUint32((*uint32)(p.P)) +} + +// PresentInCache reads presence from a cached word in the presence +// bitmap. It caches up a new word if the bit is outside the +// word. This is for really fast iteration through bitmaps in cases +// where we either know that the bitmap will not be altered, or we +// don't care about inconsistencies caused by simultaneous writes. +func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool { + if num/32 != *cachedElement { + o := uintptr(num/32) * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + *current = atomic.LoadUint32(q) + *cachedElement = num / 32 + } + return (*current & (1 << (num % 32))) > 0 +} + +// AnyPresent checks if any field is marked as present in the bitmap. +func (p presence) AnyPresent(size presenceSize) bool { + n := uintptr((size + 31) / 32) + for j := uintptr(0); j < n; j++ { + o := j * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + b := atomic.LoadUint32(q) + if b > 0 { + return true + } + } + return false +} + +// toRaceDetectData finds the preceding RaceDetectHookData in a +// message by using pointer arithmetic. As the type of the presence +// set (bitmap) varies with the number of fields in the protobuf, we +// can not have a struct type containing the array and the +// RaceDetectHookData. instead the RaceDetectHookData is placed +// immediately before the bitmap array, and we find it by walking +// backwards in the struct. +// +// This method is only called from the race-detect version of the code, +// so RaceDetectHookData is never an empty struct. +func (p presence) toRaceDetectData() *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o)) +} + +func atomicLoadShadowPresence(p **[]byte) *[]byte { + return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreShadowPresence(p **[]byte, v *[]byte) { + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v)) +} + +// findPointerToRaceDetectData finds the preceding RaceDetectHookData +// in a message by using pointer arithmetic. For the methods called +// directy from generated code, we don't have a pointer to the +// beginning of the presence set, but a pointer inside the array. As +// we know the index of the bit we're manipulating (num), we can +// calculate which element of the array ptr is pointing to. With that +// information we find the preceding RaceDetectHookData and can +// manipulate the shadow bitmap. +// +// This method is only called from the race-detect version of the +// code, so RaceDetectHookData is never an empty struct. +func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0)) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index a24e6bbd..7b2995dd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -37,6 +37,10 @@ const ( // ValidationValid indicates that unmarshaling the message will succeed. ValidationValid + + // ValidationWrongWireType indicates that a validated field does not have + // the expected wire type. + ValidationWrongWireType ) func (v ValidationStatus) String() string { @@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } case protoreflect.GroupKind: vi.typ = validationTypeGroup + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } @@ -195,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } + vi.mi = getMessageInfo(ft) case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) @@ -304,26 +318,6 @@ State: } if f != nil { vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case protoregistry.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } break } // Possible extension field. diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index eb79a7ba..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e..a1f09162 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go new file mode 100644 index 00000000..82e5cab4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go @@ -0,0 +1,364 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper code for parsing a protocol buffer + +package protolazy + +import ( + "errors" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" +) + +// BufferReader is a structure encapsulating a protobuf and a current position +type BufferReader struct { + Buf []byte + Pos int +} + +// NewBufferReader creates a new BufferRead from a protobuf +func NewBufferReader(buf []byte) BufferReader { + return BufferReader{Buf: buf, Pos: 0} +} + +var errOutOfBounds = errors.New("protobuf decoding: out of bounds") +var errOverflow = errors.New("proto: integer overflow") + +func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) { + i := b.Pos + l := len(b.Buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + v := b.Buf[i] + i++ + x |= (uint64(v) & 0x7F) << shift + if v < 0x80 { + b.Pos = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// decodeVarint decodes a varint at the current position +func (b *BufferReader) DecodeVarint() (x uint64, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return b.DecodeVarintSlow() + } + + var v uint64 + // we already checked the first byte + x = uint64(buf[i]) & 127 + i++ + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 35 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 42 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 49 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 56 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 63 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// decodeVarint32 decodes a varint32 at the current position +func (b *BufferReader) DecodeVarint32() (x uint32, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint32(buf[i]), nil + } else if len(buf)-i < 5 { + v, err := b.DecodeVarintSlow() + return uint32(v), err + } + + var v uint32 + // we already checked the first byte + x = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// skipValue skips a value in the protobuf, based on the specified tag +func (b *BufferReader) SkipValue(tag uint32) (err error) { + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + err = b.SkipGroup(tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + return +} + +// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack +func (b *BufferReader) SkipGroup(tag uint32) (err error) { + tagStack := make([]uint32, 0, 16) + tagStack = append(tagStack, tag) + var n uint32 + for len(tagStack) > 0 { + tag, err = b.DecodeVarint32() + if err != nil { + return err + } + switch protowire.Type(tag & 0x7) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.Skip(8) + case protowire.BytesType: + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + tagStack = append(tagStack, tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + case protowire.EndGroupType: + if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) { + tagStack = tagStack[:len(tagStack)-1] + } else { + err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d", + protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos) + } + } + if err != nil { + return err + } + } + return nil +} + +// skipVarint effiently skips a varint +func (b *BufferReader) SkipVarint() (err error) { + i := b.Pos + + if len(b.Buf)-i < 10 { + // Use DecodeVarintSlow() to check for buffer overflow, but ignore result + if _, err := b.DecodeVarintSlow(); err != nil { + return err + } + return nil + } + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + return errOverflow + +out: + b.Pos = i + 1 + return nil +} + +// skip skips the specified number of bytes +func (b *BufferReader) Skip(n int) (err error) { + if len(b.Buf) < b.Pos+n { + return io.ErrUnexpectedEOF + } + b.Pos += n + return +} + +// skipFixed64 skips a fixed64 +func (b *BufferReader) SkipFixed64() (err error) { + return b.Skip(8) +} + +// skipFixed32 skips a fixed32 +func (b *BufferReader) SkipFixed32() (err error) { + return b.Skip(4) +} + +// skipBytes skips a set of bytes +func (b *BufferReader) SkipBytes() (err error) { + n, err := b.DecodeVarint32() + if err != nil { + return err + } + return b.Skip(int(n)) +} + +// Done returns whether we are at the end of the protobuf +func (b *BufferReader) Done() bool { + return b.Pos == len(b.Buf) +} + +// Remaining returns how many bytes remain +func (b *BufferReader) Remaining() int { + return len(b.Buf) - b.Pos +} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go new file mode 100644 index 00000000..ff4d4834 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go @@ -0,0 +1,359 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protolazy contains internal data structures for lazy message decoding. +package protolazy + +import ( + "fmt" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// IndexEntry is the structure for an index of the fields in a message of a +// proto (not descending to sub-messages) +type IndexEntry struct { + FieldNum uint32 + // first byte of this tag/field + Start uint32 + // first byte after a contiguous sequence of bytes for this tag/field, which could + // include a single encoding of the field, or multiple encodings for the field + End uint32 + // True if this protobuf segment includes multiple encodings of the field + MultipleContiguous bool +} + +// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message +// +// Deprecated: Do not use. This will be deleted in the near future. +type XXX_lazyUnmarshalInfo struct { + // Index of fields and their positions in the protobuf for this + // message. Make index be a pointer to a slice so it can be updated + // atomically. The index pointer is only set once (lazily when/if + // the index is first needed), and must always be SET and LOADED + // ATOMICALLY. + index *[]IndexEntry + // The protobuf associated with this lazily decoded message. It is + // only set during proto.Unmarshal(). It doesn't need to be set and + // loaded atomically, since any simultaneous set (Unmarshal) and read + // (during a get) would already be a race in the app code. + Protobuf []byte + // The flags present when Unmarshal was originally called for this particular message + unmarshalFlags piface.UnmarshalInputFlags +} + +// The Buffer and SetBuffer methods let v2/internal/impl interact with +// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle. + +// Buffer returns the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte { + return lazy.Protobuf +} + +// SetBuffer sets the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) { + lazy.Protobuf = b +} + +// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags. +// The flags should reflect how Unmarshal was called. +func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) { + lazy.unmarshalFlags = f +} + +// UnmarshalFlags returns the original unmarshalInputFlags. +func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags { + return lazy.unmarshalFlags +} + +// AllowedPartial returns true if the user originally unmarshalled this message with +// AllowPartial set to true +func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool { + return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0 +} + +func protoFieldNumber(tag uint32) uint32 { + return tag >> 3 +} + +// buildIndex builds an index of the specified protobuf, return the index +// array and an error. +func buildIndex(buf []byte) ([]IndexEntry, error) { + index := make([]IndexEntry, 0, 16) + var lastProtoFieldNum uint32 + var outOfOrder bool + + var r BufferReader = NewBufferReader(buf) + + for !r.Done() { + var tag uint32 + var err error + var curPos = r.Pos + // INLINED: tag, err = r.DecodeVarint32() + { + i := r.Pos + buf := r.Buf + + if i >= len(buf) { + return nil, errOutOfBounds + } else if buf[i] < 0x80 { + r.Pos++ + tag = uint32(buf[i]) + } else if r.Remaining() < 5 { + var v uint64 + v, err = r.DecodeVarintSlow() + tag = uint32(v) + } else { + var v uint32 + // we already checked the first byte + tag = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 28 + if v < 128 { + goto done + } + + return nil, errOutOfBounds + + done: + r.Pos = i + } + } + // DONE: tag, err = r.DecodeVarint32() + + fieldNum := protoFieldNumber(tag) + if fieldNum < lastProtoFieldNum { + outOfOrder = true + } + + // Skip the current value -- will skip over an entire group as well. + // INLINED: err = r.SkipValue(tag) + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + // INLINED: err = r.SkipVarint() + i := r.Pos + + if len(r.Buf)-i < 10 { + // Use DecodeVarintSlow() to skip while + // checking for buffer overflow, but ignore result + _, err = r.DecodeVarintSlow() + goto out2 + } + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + return nil, errOverflow + out: + r.Pos = i + 1 + // DONE: err = r.SkipVarint() + case protowire.Fixed64Type: + err = r.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = r.DecodeVarint32() + if err == nil { + err = r.Skip(int(n)) + } + case protowire.StartGroupType: + err = r.SkipGroup(tag) + case protowire.Fixed32Type: + err = r.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + // DONE: err = r.SkipValue(tag) + + out2: + if err != nil { + return nil, err + } + if fieldNum != lastProtoFieldNum { + index = append(index, IndexEntry{FieldNum: fieldNum, + Start: uint32(curPos), + End: uint32(r.Pos)}, + ) + } else { + index[len(index)-1].End = uint32(r.Pos) + index[len(index)-1].MultipleContiguous = true + } + lastProtoFieldNum = fieldNum + } + if outOfOrder { + sort.Slice(index, func(i, j int) bool { + return index[i].FieldNum < index[j].FieldNum || + (index[i].FieldNum == index[j].FieldNum && + index[i].Start < index[j].Start) + }) + } + return index, nil +} + +func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + size += int(entry.End - entry.Start) + } + return size + } + if !found { + return 0 + } + return int(end - start) +} + +func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + b = append(b, lazy.Protobuf[entry.Start:entry.End]...) + } + return b, true + } + if !found { + return nil, false + } + b = append(b, lazy.Protobuf[start:end]...) + return b, true +} + +func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) { + atomicStoreIndex(&lazy.index, &index) +} + +// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information +// (including protobuf), returns startOffset/endOffset/found. +func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) { + if lazy.Protobuf == nil { + // There is no backing protobuf for this message -- it was made from a builder + return 0, 0, false, false, nil + } + index := atomicLoadIndex(&lazy.index) + if index == nil { + r, err := buildIndex(lazy.Protobuf) + if err != nil { + panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err)) + } + // lazy.index is a pointer to the slice returned by BuildIndex + index = &r + atomicStoreIndex(&lazy.index, index) + } + return lookupField(index, fieldNum) +} + +// lookupField returns the offset at which the indicated field starts using +// the index, offset immediately after field ends (including all instances of +// a repeated field), and bools indicating if field was found and if there +// are multiple encodings of the field in the byte range. +// +// To hande the uncommon case where there are repeated encodings for the same +// field which are not consecutive in the protobuf (so we need to returns +// multiple start/end offsets), we also return a slice multipleEntries. If +// multipleEntries is non-nil, then multiple entries were found, and the +// values in the slice should be used, rather than start/end/found. +func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) { + // The pointer indexp to the index was already loaded atomically. + // The slice is uniquely associated with the pointer, so it doesn't + // need to be loaded atomically. + index := *indexp + for i, entry := range index { + if fieldNum == entry.FieldNum { + if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum { + // Handle the uncommon case where there are + // repeated entries for the same field which + // are not contiguous in the protobuf. + multiple := make([]IndexEntry, 1, 2) + multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous} + i++ + for i < len(index) && index[i].FieldNum == fieldNum { + multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous}) + i++ + } + return 0, 0, false, false, multiple + + } + return entry.Start, entry.End, true, entry.MultipleContiguous, nil + } + if fieldNum < entry.FieldNum { + return 0, 0, false, false, nil + } + } + return 0, 0, false, false, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go new file mode 100644 index 00000000..dc2a64ca --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protolazy + +import ( + "sync/atomic" + "unsafe" +) + +func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry { + return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333..00000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0..832a7988 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2b..1ffddf68 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a50fcfb4..01efc330 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 33 - Patch = 0 + Minor = 36 + Patch = 5 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index e5b03b56..4cbf1aea 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -8,7 +8,6 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" @@ -47,10 +46,18 @@ type UnmarshalOptions struct { // RecursionLimit limits how deeply messages may be nested. // If zero, a default limit is applied. RecursionLimit int + + // + // NoLazyDecoding turns off lazy decoding, which otherwise is enabled by + // default. Lazy decoding only affects submessages (annotated with [lazy = + // true] in the .proto file) within messages that use the Opaque API. + NoLazyDecoding bool } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err @@ -102,6 +109,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } + + if !allowPartial { + // This does not affect how current unmarshal functions work, it just allows them + // to record this for lazy the decoding case. + in.Flags |= protoiface.UnmarshalCheckRequired + } + if o.NoLazyDecoding { + in.Flags |= protoiface.UnmarshalNoLazyDecoding + } + out, err = methods.Unmarshal(in) } else { o.RecursionLimit-- @@ -154,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } } // Parse the field value. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 4fed202f..f0473c58 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,17 @@ package proto import ( + "errors" + "fmt" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -58,7 +63,8 @@ type MarshalOptions struct { // options (except for UseCachedSize itself). // // 2. The message and all its submessages have not changed in any - // way since the Size call. + // way since the Size call. For lazily decoded messages, accessing + // a message results in decoding the message, which is a change. // // If either of these invariants is violated, // the results are undefined and may include panics or corrupted output. @@ -70,7 +76,32 @@ type MarshalOptions struct { UseCachedSize bool } +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + // Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -116,6 +147,9 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -145,12 +179,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize + Flags: o.flags(), } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -168,6 +197,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b0..c36d4a9c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 17899a3a..78445d11 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,18 +11,21 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { return false } - return m.ProtoReflect().Has(xt.TypeDescriptor()) + return mr.Has(xd) } // ClearExtension clears an extension field such that subsequent @@ -36,7 +39,49 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -48,7 +93,36 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -75,7 +149,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 312d5d45..575d1483 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index f1692b49..c8675806 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -12,11 +12,19 @@ import ( ) // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func Size(m Message) int { return MarshalOptions{}.Size(m) } // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func (o MarshalOptions) Size(m Message) int { // Treat a nil message interface as an empty message; nothing to output. if m == nil { @@ -34,6 +42,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, + Flags: o.flags(), }) return out.Size } @@ -42,6 +51,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, + Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go new file mode 100644 index 00000000..267fd0f1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// ValueOrNil returns nil if has is false, or a pointer to a new variable +// containing the value returned by the specified getter. +// +// This function is similar to the wrappers (proto.Int32(), proto.String(), +// etc.), but is generic (works for any field type) and works with the hasser +// and getter of a field, as opposed to a value. +// +// This is convenient when populating builder fields. +// +// Example: +// +// hop := attr.GetDirectHop() +// injectedRoute := ripb.InjectedRoute_builder{ +// Prefixes: route.GetPrefixes(), +// NextHop: proto.ValueOrNil(hop.HasAddress(), hop.GetAddress), +// } +func ValueOrNil[T any](has bool, getter func() T) *T { + if !has { + return nil + } + v := getter() + return &v +} + +// ValueOrDefault returns the protobuf message val if val is not nil, otherwise +// it returns a pointer to an empty val message. +// +// This function allows for translating code from the old Open Struct API to the +// new Opaque API. +// +// The old Open Struct API represented oneof fields with a wrapper struct: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{ +// // The Avatar oneof will be set, with an empty SignedImage. +// Avatar: &accountpb.Profile_SignedImage{signedImg}, +// } +// +// The new Opaque API treats oneof fields like regular fields, there are no more +// wrapper structs: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{} +// profile.SetSignedImage(signedImg) +// +// For convenience, the Opaque API also offers Builders, which allow for a +// direct translation of struct initialization. However, because Builders use +// nilness to represent field presence (but there is no non-nil wrapper struct +// anymore), Builders cannot distinguish between an unset oneof and a set oneof +// with nil message. The above code would need to be translated with help of the +// ValueOrDefault function to retain the same behavior: +// +// var signedImg *accountpb.SignedImage +// return &accountpb.Profile_builder{ +// SignedImage: proto.ValueOrDefault(signedImg), +// }.Build() +func ValueOrDefault[T interface { + *P + Message +}, P any](val T) T { + if val == nil { + return T(new(P)) + } + return val +} + +// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of +// type []byte. +func ValueOrDefaultBytes(val []byte) []byte { + if val == nil { + return []byte{} + } + return val +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index baa0cc62..823dbf3b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,9 @@ package protodesc import ( + "strings" + + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" @@ -91,21 +94,27 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot switch fd.GetSyntax() { case "proto2", "": f.L1.Syntax = protoreflect.Proto2 + f.L1.Edition = filedesc.EditionProto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + f.L1.Edition = filedesc.EditionProto3 case "editions": f.L1.Syntax = protoreflect.Editions f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + // Allow cmd/protoc-gen-go/testdata to use any edition for easier + // testing of upcoming edition features. + if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } + } f.L1.Package = protoreflect.FullName(fd.GetPackage()) if !f.L1.Package.IsValid() && f.L1.Package != "" { return nil, errors.New("invalid package: %q", f.L1.Package) @@ -114,9 +123,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } - if f.L1.Syntax == protoreflect.Editions { - initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) - } + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -125,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } f.L2.Imports[i].IsPublic = true } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } imps := importSet{f.Path(): true} for i, path := range fd.GetDependency() { imp := &f.L2.Imports[i] f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + if err == protoregistry.NotFound && o.AllowUnresolvable { f = filedesc.PlaceholderFile(path) } else if err != nil { return nil, errors.New("could not resolve import %q: %v", path, err) @@ -219,10 +220,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { return nil, err } - if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { return nil, err } - if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { return nil, err } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index b3278163..9da34998 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -69,9 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } - if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) - } + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -146,13 +144,15 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { return nil, err } + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) f.L1.IsProto3Optional = fd.GetProto3Optional() if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() - f.L1.HasPacked = opts.Packed != nil - f.L1.IsPacked = opts.GetPacked() + f.L1.IsLazy = opts.GetLazy() + if opts.Packed != nil { + f.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) @@ -163,32 +163,12 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc f.L1.StringName.InitJSON(fd.GetJsonName()) } - if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) - - if f.L1.EditionFeatures.IsLegacyRequired { - f.L1.Cardinality = protoreflect.Required - } - // We reuse the existing field because the old option `[packed = - // true]` is mutually exclusive with the editions feature. - if canBePacked(fd) { - f.L1.HasPacked = true - f.L1.IsPacked = f.L1.EditionFeatures.IsPacked - } - - // We pretend this option is always explicitly set because the only - // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 - // or to return the appropriate default. - // When using editions we either parse the option or resolve the - // appropriate default here (instead of later when this option is - // requested from the descriptor). - // In proto2/proto3 syntax HasEnforceUTF8 might be false. - f.L1.HasEnforceUTF8 = true - f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } - if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { - f.L1.Kind = protoreflect.GroupKind - } + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind } } return fs, nil @@ -201,12 +181,10 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { return nil, err } + o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } - if parent.Syntax() == protoreflect.Editions { - o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) - } } } return os, nil @@ -220,10 +198,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { return nil, err } + x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) if opts := xd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) x.L2.Options = func() protoreflect.ProtoMessage { return opts } - x.L2.IsPacked = opts.GetPacked() + if opts.Packed != nil { + x.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) @@ -233,6 +214,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 254ca585..ff692436 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -43,9 +43,14 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc o.L1.Fields.List = append(o.L1.Fields.List, f) } - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { @@ -68,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { for i, xd := range xds { x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil { return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil { return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) } if xd.DefaultValue != nil { @@ -90,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc s := &ss[i] for j, md := range sd.GetMethod() { m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType())) if err != nil { return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType())) if err != nil { return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) } @@ -106,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc // findTarget finds an enum or message descriptor if k is an enum, message, // group, or unknown. If unknown, and the name could be resolved, the kind // returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { switch k { case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) + ed, err := r.findEnumDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } return k, ed, nil, nil case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) + md, err := r.findMessageDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } @@ -124,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // Handle unspecified kinds (possible with parsers that operate // on a per-file basis without knowledge of dependencies). d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return 0, nil, nil, errors.New("%q not found", ref.FullName()) @@ -201,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) } } -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderEnum(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) @@ -217,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa return ed, nil } -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index e4dcaf87..c343d922 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri if allowAlias && !foundAlias { return errors.New("enum %q allows aliases, but none were found", e.FullName()) } - if e.Syntax() == protoreflect.Proto3 { + if !e.IsClosed() { if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) } - // Verify that value names in proto3 do not conflict if the + // Verify that value names in open enums do not conflict if the // case-insensitive prefix is removed. // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 names := map[string]protoreflect.EnumValueDescriptor{} @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri v1 := e.Values().Get(i) s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) } names[s] = v1 } @@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri return nil } -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + // There are a few limited exceptions only for proto3 + isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) for i, md := range mds { m := &ms[i] @@ -107,25 +109,13 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } - if m.Syntax() == protoreflect.Proto3 { + if isProto3 { if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { @@ -149,7 +139,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) } if f.L1.IsProto3Optional { - if f.Syntax() != protoreflect.Proto3 { + if !isProto3 { return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) } if f.Cardinality() != protoreflect.Optional { @@ -159,29 +149,26 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } - if err := checkValidGroup(f); err != nil { + if err := checkValidGroup(file, f); err != nil { return errors.New("message field %q is an invalid group: %v", f.FullName(), err) } if err := checkValidMap(f); err != nil { return errors.New("message field %q is an invalid map: %v", f.FullName(), err) } - if f.Syntax() == protoreflect.Proto3 { + if isProto3 { if f.Cardinality() == protoreflect.Required { return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { - return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) } } + if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) + } } seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs for j := range md.GetOneofDecl() { @@ -206,26 +193,23 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if f.Cardinality() != protoreflect.Optional { return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } } } if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { return err } - if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { return err } - if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { return err } } return nil } -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { for i, xd := range xds { x := &xs[i] // NOTE: Avoid using the IsValid method since extensions to MessageSet @@ -261,19 +245,16 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb. return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) } } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } - if err := checkValidGroup(x); err != nil { + if err := checkValidGroup(f, x); err != nil { return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) } if md := x.Message(); md != nil && md.IsMapEntry() { return errors.New("extension field %q cannot be a map entry", x.FullName()) } - if x.Syntax() == protoreflect.Proto3 { + if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { switch x.ContainingMessage().FullName() { case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): @@ -309,21 +290,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool { // checkValidGroup reports whether fd is a valid group according to the same // rules that protoc imposes. -func checkValidGroup(fd protoreflect.FieldDescriptor) error { +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { md := fd.Message() switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() == protoreflect.Proto3: + case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") + } + if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { + switch { + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } } return nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 2a6b29d1..697a61b2 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -11,15 +11,11 @@ import ( "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" -) - -const ( - SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 - SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} @@ -48,6 +44,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { return descriptorpb.Edition_EDITION_PROTO3 case filedesc.Edition2023: return descriptorpb.Edition_EDITION_2023 + case filedesc.Edition2024: + return descriptorpb.Edition_EDITION_2024 default: panic(fmt.Sprintf("unknown value for edition: %v", ed)) } @@ -67,18 +65,20 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) os.Exit(1) } - fs := defaults.GetDefaults()[0].GetFeatures() + fsed := defaults.GetDefaults()[0] // Using a linear search for now. // Editions are guaranteed to be sorted and thus we could use a binary search. // Given that there are only a handful of editions (with one more per year) // there is not much reason to use a binary search. for _, def := range defaults.GetDefaults() { if def.GetEdition() <= edpb { - fs = def.GetFeatures() + fsed = def } else { break } } + fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) + proto.Merge(fs, fsed.GetOverridableFeatures()) defaultsCache[ed] = fs return fs } @@ -126,10 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + // + // Further, we harden this code against adversarial inputs: a + // service which accepts descriptors from a possibly malicious + // source shouldn't crash. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + gf, ok := goFeatures.Interface().(protoreflect.Message) + if !ok { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + fields := gf.Descriptor().Fields() + + if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.BoolKind && + gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index 9d6e0542..9b880aa8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if imp.IsPublic { p.PublicDependency = append(p.PublicDependency, int32(i)) } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } } for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { loc := locs.Get(i) @@ -73,6 +70,16 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + if file.Syntax() == protoreflect.Editions { + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } + + if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { + p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() + } + } return p } @@ -153,6 +160,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) } + if field.Syntax() == protoreflect.Editions { + // Editions have no group keyword, this type is only set so that downstream users continue + // treating this as delimited encoding. + if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } + // Editions have no required keyword, this label is only set so that downstream users continue + // treating it as required. + if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + } if field.HasDefault() { def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) if err != nil && field.DefaultEnumValue() != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6e..742cb518 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 00b01fbd..c85bfaa5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -161,7 +161,7 @@ const ( // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3: + case Proto2, Proto3, Editions: return true default: return false diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 7dcc2ff0..ea154eec 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -373,6 +373,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) case 21: b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -483,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -519,6 +523,23 @@ func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { return b } +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 60ff62b4..cd7fbc87 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -68,7 +68,7 @@ type Descriptor interface { // dependency is not resolved, in which case only name information is known. // // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: + // as a result of unresolved dependencies: // // ╔═══════════════════════════════════╤═════════════════════╗ // ║ Accessor │ Descriptor ║ @@ -168,11 +168,7 @@ type FileImport struct { // The current file and the imported file must be within proto package. IsPublic bool - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. + // Deprecated: support for weak fields has been removed. IsWeak bool } @@ -325,9 +321,7 @@ type FieldDescriptor interface { // specified in the source .proto file. HasOptionalKeyword() bool - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. + // Deprecated: support for weak fields has been removed. IsWeak() bool // IsPacked reports whether repeated primitive numeric kinds should be @@ -510,7 +504,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +513,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and @@ -544,6 +538,12 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index a7b0d06f..a4b78ace 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // google.golang.org/protobuf/runtime/protoiface.Methods. + // [google.golang.org/protobuf/runtime/protoiface.Methods]. // Consult the protoiface package documentation for details. ProtoMethods() *methods } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 7ced876f..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v interface{}) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() interface{} { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 16030973..9fe83cef 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e..0015fcb3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect @@ -45,7 +44,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +79,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +92,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 43547011..479527b5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect @@ -15,7 +14,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +36,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +69,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +80,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52..de177733 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d..28e9e9f0 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -119,6 +122,22 @@ type UnmarshalInputFlags = uint8 const ( UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota + + // UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer. + // The unmarshaller must not modify the contents of the buffer. + UnmarshalAliasBuffer + + // UnmarshalValidated indicates that validation has already been + // performed on the input buffer. + UnmarshalValidated + + // UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are + // initialized. + UnmarshalCheckRequired + + // UnmarshalNoLazyDecoding is set if this unmarshal operation should not use + // lazy decoding, even when otherwise available. + UnmarshalNoLazyDecoding ) // UnmarshalOutputFlags are output from the Unmarshal method. @@ -166,3 +185,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go index 4a1ab7fb..93df1b56 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/filetype" "google.golang.org/protobuf/internal/impl" + "google.golang.org/protobuf/internal/protolazy" ) // UnsafeEnabled specifies whether package unsafe can be used. @@ -39,6 +40,9 @@ type ( ExtensionFieldV1 = impl.ExtensionField Pointer = impl.Pointer + + LazyUnmarshalInfo = *protolazy.XXX_lazyUnmarshalInfo + RaceDetectHookData = impl.RaceDetectHookData ) var X impl.Export diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 78624cf6..a5163376 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -46,6 +46,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // The full set of known editions. @@ -54,6 +55,9 @@ type Edition int32 const ( // A placeholder for an unknown edition value. Edition_EDITION_UNKNOWN Edition = 0 + // A placeholder edition for specifying default behaviors *before* a feature + // was first introduced. This is effectively an "infinite past". + Edition_EDITION_LEGACY Edition = 900 // Legacy syntax "editions". These pre-date editions, but behave much like // distinct editions. These can't be used to specify the edition of proto // files, but feature definitions must supply proto2/proto3 defaults for @@ -66,7 +70,7 @@ const ( Edition_EDITION_2023 Edition = 1000 Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be - // used or relyed on outside of tests. + // used or relied on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 Edition_EDITION_2_TEST_ONLY Edition = 2 Edition_EDITION_99997_TEST_ONLY Edition = 99997 @@ -82,6 +86,7 @@ const ( var ( Edition_name = map[int32]string{ 0: "EDITION_UNKNOWN", + 900: "EDITION_LEGACY", 998: "EDITION_PROTO2", 999: "EDITION_PROTO3", 1000: "EDITION_2023", @@ -95,6 +100,7 @@ var ( } Edition_value = map[string]int32{ "EDITION_UNKNOWN": 0, + "EDITION_LEGACY": 900, "EDITION_PROTO2": 998, "EDITION_PROTO3": 999, "EDITION_2023": 1000, @@ -572,8 +578,6 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { } // If set to RETENTION_SOURCE, the option will be omitted from the binary. -// Note: as of January 2023, support for this is in progress and does not yet -// have an effect (b/264593489). type FieldOptions_OptionRetention int32 const ( @@ -635,8 +639,7 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { // This indicates the types of entities that the field may apply to when used // as an option. If it is unset, then the field may be freely used as an -// option on any kind of entity. Note: as of January 2023, support for this is -// in progress and does not yet have an effect (b/264593489). +// option on any kind of entity. type FieldOptions_OptionTargetType int32 const ( @@ -1203,20 +1206,18 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1227,7 +1228,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1251,12 +1252,9 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { // Describes a complete .proto file. type FileDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` // Indexes of the public imported files in the dependency list above. @@ -1281,16 +1279,16 @@ type FileDescriptorProto struct { // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. - Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1301,7 +1299,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1409,10 +1407,7 @@ func (x *FileDescriptorProto) GetEdition() Edition { // Describes a message type. type DescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` @@ -1424,16 +1419,16 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1444,7 +1439,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1530,11 +1525,7 @@ func (x *DescriptorProto) GetReservedName() []string { } type ExtensionRangeOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` // For external users: DO NOT USE. We are in the process of open sourcing @@ -1546,7 +1537,10 @@ type ExtensionRangeOptions struct { // The verification state of the range. // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. - Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ExtensionRangeOptions fields. @@ -1556,11 +1550,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1571,7 +1563,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1616,10 +1608,7 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica // Describes a field within a message. type FieldDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` @@ -1671,15 +1660,15 @@ type FieldDescriptorProto struct { // Proto2 optional fields do not set this flag, because they already indicate // optional with `LABEL_OPTIONAL`. Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1690,7 +1679,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1784,21 +1773,18 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool { // Describes a oneof. type OneofDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1809,7 +1795,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1840,10 +1826,7 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` @@ -1853,16 +1836,16 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1873,7 +1856,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1925,22 +1908,19 @@ func (x *EnumDescriptorProto) GetReservedName() []string { // Describes a value within an enum. type EnumValueDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1951,7 +1931,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1989,22 +1969,19 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2015,7 +1992,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2053,11 +2030,8 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { // Describes a method of a service. type MethodDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` @@ -2067,6 +2041,8 @@ type MethodDescriptorProto struct { ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodDescriptorProto fields. @@ -2077,11 +2053,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2092,7 +2066,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2150,11 +2124,7 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool { } type FileOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Sets the Java package where classes generated from this .proto will be // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards @@ -2177,12 +2147,16 @@ type FileOptions struct { // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. + // A proto2 file can set this to true to opt in to UTF-8 checking for Java, + // which will throw an exception if invalid UTF-8 is parsed from the wire or + // assigned to a string field. + // + // TODO: clarify exactly what kinds of field types this option + // applies to, and update these docs accordingly. + // + // Proto3 files already perform these checks. Setting the option explicitly to + // false has no effect: it cannot be used to opt proto3 files out of UTF-8 + // checks. JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be @@ -2242,6 +2216,9 @@ type FileOptions struct { // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FileOptions fields. @@ -2258,11 +2235,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2273,7 +2248,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2437,11 +2412,7 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { } type MessageOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set true to use the old proto1 MessageSet wire format for extensions. // This is provided for backwards-compatibility with the MessageSet wire // format. You should not use this for any other reason: It's less @@ -2514,6 +2485,9 @@ type MessageOptions struct { Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MessageOptions fields. @@ -2525,11 +2499,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2540,7 +2512,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2606,17 +2578,14 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { } type FieldOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` + // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead. // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific // options below. This option is only implemented to support use of // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of - // type "bytes" in the open source release -- sorry, we'll try to include - // other types in a future version! + // type "bytes" in the open source release. + // TODO: make ctype actually deprecated. Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2679,9 +2648,13 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FieldOptions fields. @@ -2697,11 +2670,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2712,7 +2683,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2811,6 +2782,13 @@ func (x *FieldOptions) GetFeatures() *FeatureSet { return nil } +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2819,24 +2797,21 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { } type OneofOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2847,7 +2822,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2877,11 +2852,7 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set this option to true to allow mapping different tag names to the same // value. AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` @@ -2903,6 +2874,9 @@ type EnumOptions struct { Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumOptions fields. @@ -2912,11 +2886,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2927,7 +2899,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2979,11 +2951,7 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumValueOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this enum value deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, @@ -2995,8 +2963,13 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumValueOptions fields. @@ -3007,11 +2980,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3022,7 +2993,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3058,6 +3029,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3066,11 +3044,7 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { } type ServiceOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? @@ -3080,6 +3054,9 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ServiceOptions fields. @@ -3089,11 +3066,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3104,7 +3079,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3141,11 +3116,7 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { } type MethodOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this method deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, @@ -3156,6 +3127,9 @@ type MethodOptions struct { Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodOptions fields. @@ -3166,11 +3140,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3181,7 +3153,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3231,11 +3203,8 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions // in them. type UninterpretedOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` @@ -3244,15 +3213,15 @@ type UninterpretedOption struct { DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3263,7 +3232,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3334,26 +3303,23 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3364,7 +3330,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3426,10 +3392,7 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { // feature resolution. The resolution with this object becomes a simple search // for the closest matching edition, followed by proto merges. type FeatureSetDefaults struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` // The minimum supported edition (inclusive) when this was constructed. // Editions before this will not have defaults. @@ -3437,15 +3400,15 @@ type FeatureSetDefaults struct { // The maximum known edition (inclusive) when this was constructed. Editions // after this will not have reliable defaults. MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3456,7 +3419,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3495,10 +3458,7 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition { // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A Location identifies a piece of source code in a .proto file which // corresponds to a particular definition. This information is intended // to be useful to IDEs, code indexers, documentation generators, and similar @@ -3547,16 +3507,17 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3567,7 +3528,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3593,22 +3554,19 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { // file. A GeneratedCodeInfo message is associated with only one generated // source file, but may contain references to different source .proto files. type GeneratedCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3619,7 +3577,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3642,22 +3600,19 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { } type DescriptorProto_ExtensionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3668,7 +3623,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3708,21 +3663,18 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3733,7 +3685,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3763,10 +3715,7 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { } type ExtensionRangeOptions_Declaration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The extension number declared within the extension range. Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` // The fully-qualified name of the extension field. There must be a leading @@ -3782,16 +3731,16 @@ type ExtensionRangeOptions_Declaration struct { Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` // If true, indicates that the extension must be defined as repeated. // Otherwise the extension must be defined as optional. - Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3802,7 +3751,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3859,21 +3808,18 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { // is inclusive such that it can appropriately represent the entire int32 // domain. type EnumDescriptorProto_EnumReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3884,7 +3830,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3914,21 +3860,18 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { } type FieldOptions_EditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. + sizeCache protoimpl.SizeCache } func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3939,7 +3882,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3968,27 +3911,103 @@ func (x *FieldOptions_EditionDefault) GetValue() string { return "" } +// Information about the support window of a feature. +type FieldOptions_FeatureSupport struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The edition that this feature was first available in. In editions + // earlier than this one, the default assigned to EDITION_LEGACY will be + // used, and proto files will not be able to override it. + EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` + // The edition this feature becomes deprecated in. Using this after this + // edition may trigger warnings. + EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` + // The deprecation warning text if this feature is used after the edition it + // was marked deprecated in. + DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` + // The edition this feature is no longer available in. In editions after + // this one, the last default assigned will be used, and proto files will + // not be able to override it. + EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldOptions_FeatureSupport) Reset() { + *x = FieldOptions_FeatureSupport{} + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldOptions_FeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_FeatureSupport) ProtoMessage() {} + +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { + if x != nil && x.EditionIntroduced != nil { + return *x.EditionIntroduced + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { + if x != nil && x.EditionDeprecated != nil { + return *x.EditionDeprecated + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { + if x != nil && x.DeprecationWarning != nil { + return *x.DeprecationWarning + } + return "" +} + +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { + if x != nil && x.EditionRemoved != nil { + return *x.EditionRemoved + } + return Edition_EDITION_UNKNOWN +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents // "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` unknownFields protoimpl.UnknownFields - - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -3998,8 +4017,8 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4033,21 +4052,21 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { // the defaults at the closest matching edition ordered at or before it should // be used. This field must be in strict ascending order by edition. type FeatureSetDefaults_FeatureSetEditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // Defaults of features that can be overridden in this edition. + OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` + // Defaults of features that can't be overridden in this edition. + FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4057,8 +4076,8 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4080,18 +4099,22 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { return Edition_EDITION_UNKNOWN } -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { if x != nil { - return x.Features + return x.OverridableFeatures } return nil } -type SourceCodeInfo_Location struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { + if x != nil { + return x.FixedFeatures + } + return nil +} +type SourceCodeInfo_Location struct { + state protoimpl.MessageState `protogen:"open.v1"` // Identifies which part of the FileDescriptorProto was defined at this // location. // @@ -4183,15 +4206,15 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4201,8 +4224,8 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4253,10 +4276,7 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { } type GeneratedCodeInfo_Annotation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -4268,17 +4288,17 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4288,8 +4308,8 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4341,323 +4361,499 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ +var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, - 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, - 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, - 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, - 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, - 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, + 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, + 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, - 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, - 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, - 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, - 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, + 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, + 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, + 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, + 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, + 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, + 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, + 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, + 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, + 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, + 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, + 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, + 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, + 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, + 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, - 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, - 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, - 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, - 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, - 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, - 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, - 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, - 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, - 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, - 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, + 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, + 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, + 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, + 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, + 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, + 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, - 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, - 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, - 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, - 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, - 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, - 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, - 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, - 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, + 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, + 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, + 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, + 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, + 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, + 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, + 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, + 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, + 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, + 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, + 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, + 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, + 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, + 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, + 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, + 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, + 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, @@ -4665,288 +4861,145 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, - 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, - 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, - 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, - 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, - 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, + 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, - 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, - 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, - 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, - 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, - 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, - 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, - 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, - 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, - 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, - 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, - 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, + 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, + 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, - 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, - 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, - 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, - 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, - 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, + 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, + 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, + 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, + 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, - 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, + 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, + 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, + 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, + 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, + 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, + 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, @@ -4962,130 +5015,139 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, - 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, - 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, - 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, - 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, - 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, - 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, - 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, - 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, - 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, - 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, - 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, - 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, - 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, + 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, + 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, + 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, + 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, + 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, + 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, + 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, + 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, + 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, + 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, - 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, - 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, - 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, - 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, - 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, - 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, - 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -} + 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, + 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, + 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, + 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, + 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, + 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, + 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, + 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, + 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, + 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, +}) var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc + file_google_protobuf_descriptor_proto_rawDescData []byte ) func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) }) return file_google_protobuf_descriptor_proto_rawDescData } var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5131,10 +5193,11 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation + (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto @@ -5179,40 +5242,46 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet - 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 71, // [71:71] is the sub-list for method output_type - 71, // [71:71] is the sub-list for method input_type - 71, // [71:71] is the sub-list for extension type_name - 71, // [71:71] is the sub-list for extension extendee - 0, // [0:71] is the sub-list for field type_name + 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5220,419 +5289,13 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), NumEnums: 17, - NumMessages: 32, + NumMessages: 33, NumExtensions: 0, NumServices: 0, }, @@ -5642,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() { MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, }.Build() File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil file_google_protobuf_descriptor_proto_goTypes = nil file_google_protobuf_descriptor_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 25de5ae0..28d24bad 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -6,9 +6,9 @@ // https://developers.google.com/open-source/licenses/bsd // Code generated by protoc-gen-go. DO NOT EDIT. -// source: reflect/protodesc/proto/go_features.proto +// source: google/protobuf/go_features.proto -package proto +package gofeaturespb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -16,24 +16,153 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) -type GoFeatures struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type GoFeatures_APILevel int32 + +const ( + // API_LEVEL_UNSPECIFIED results in selecting the OPEN API, + // but needs to be a separate value to distinguish between + // an explicitly set api level or a missing api level. + GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0 + GoFeatures_API_OPEN GoFeatures_APILevel = 1 + GoFeatures_API_HYBRID GoFeatures_APILevel = 2 + GoFeatures_API_OPAQUE GoFeatures_APILevel = 3 +) + +// Enum value maps for GoFeatures_APILevel. +var ( + GoFeatures_APILevel_name = map[int32]string{ + 0: "API_LEVEL_UNSPECIFIED", + 1: "API_OPEN", + 2: "API_HYBRID", + 3: "API_OPAQUE", + } + GoFeatures_APILevel_value = map[string]int32{ + "API_LEVEL_UNSPECIFIED": 0, + "API_OPEN": 1, + "API_HYBRID": 2, + "API_OPAQUE": 3, + } +) + +func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel { + p := new(GoFeatures_APILevel) + *p = x + return p +} + +func (x GoFeatures_APILevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor() +} + +func (GoFeatures_APILevel) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[0] +} + +func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_APILevel(num) + return nil +} + +// Deprecated: Use GoFeatures_APILevel.Descriptor instead. +func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0} +} +type GoFeatures_StripEnumPrefix int32 + +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3 +) + +// Enum value maps for GoFeatures_StripEnumPrefix. +var ( + GoFeatures_StripEnumPrefix_name = map[int32]string{ + 0: "STRIP_ENUM_PREFIX_UNSPECIFIED", + 1: "STRIP_ENUM_PREFIX_KEEP", + 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH", + 3: "STRIP_ENUM_PREFIX_STRIP", + } + GoFeatures_StripEnumPrefix_value = map[string]int32{ + "STRIP_ENUM_PREFIX_UNSPECIFIED": 0, + "STRIP_ENUM_PREFIX_KEEP": 1, + "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2, + "STRIP_ENUM_PREFIX_STRIP": 3, + } +) + +func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix { + p := new(GoFeatures_StripEnumPrefix) + *p = x + return p +} + +func (x GoFeatures_StripEnumPrefix) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor() +} + +func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[1] +} + +func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_StripEnumPrefix(num) + return nil +} + +// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead. +func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1} +} + +type GoFeatures struct { + state protoimpl.MessageState `protogen:"open.v1"` // Whether or not to generate the deprecated UnmarshalJSON method for enums. + // Can only be true for proto using the Open Struct api. LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + // One of OPEN, HYBRID or OPAQUE. + ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"` + StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -43,8 +172,8 @@ func (x *GoFeatures) String() string { func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { - mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -56,7 +185,7 @@ func (x *GoFeatures) ProtoReflect() protoreflect.Message { // Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. func (*GoFeatures) Descriptor() ([]byte, []int) { - return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} } func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { @@ -66,112 +195,151 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } -var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ +func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel { + if x != nil && x.ApiLevel != nil { + return *x.ApiLevel + } + return GoFeatures_API_LEVEL_UNSPECIFIED +} + +func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix { + if x != nil && x.StripEnumPrefix != nil { + return *x.StripEnumPrefix + } + return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED +} + +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), ExtensionType: (*GoFeatures)(nil), Field: 1002, - Name: "google.protobuf.go", + Name: "pb.go", Tag: "bytes,1002,opt,name=go", - Filename: "reflect/protodesc/proto/go_features.proto", + Filename: "google/protobuf/go_features.proto", }, } // Extension fields to descriptorpb.FeatureSet. var ( - // optional google.protobuf.GoFeatures go = 1002; - E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] + // optional pb.GoFeatures go = 1002; + E_Go = &file_google_protobuf_go_features_proto_extTypes[0] ) -var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor - -var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, - 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, - 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, - 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, - 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, - 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, - 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor + +var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, + 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, + 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, + 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, + 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, + 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, + 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, + 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, + 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, -} + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, +}) var ( - file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once - file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescOnce sync.Once + file_google_protobuf_go_features_proto_rawDescData []byte ) -func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { - file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { - file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { + file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) }) - return file_reflect_protodesc_proto_go_features_proto_rawDescData + return file_google_protobuf_go_features_proto_rawDescData } -var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ - (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_go_features_proto_goTypes = []any{ + (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel + (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix + (*GoFeatures)(nil), // 2: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet } -var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ - 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet - 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name +var file_google_protobuf_go_features_proto_depIdxs = []int32{ + 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel + 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix + 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet + 2, // 3: pb.go:type_name -> pb.GoFeatures + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } -func init() { file_reflect_protodesc_proto_go_features_proto_init() } -func file_reflect_protodesc_proto_go_features_proto_init() { - if File_reflect_protodesc_proto_go_features_proto != nil { +func init() { file_google_protobuf_go_features_proto_init() } +func file_google_protobuf_go_features_proto_init() { + if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, - NumEnums: 0, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), + NumEnums: 2, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, - GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, - DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, - MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, - ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + GoTypes: file_google_protobuf_go_features_proto_goTypes, + DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + EnumInfos: file_google_protobuf_go_features_proto_enumTypes, + MessageInfos: file_google_protobuf_go_features_proto_msgTypes, + ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() - File_reflect_protodesc_proto_go_features_proto = out.File - file_reflect_protodesc_proto_go_features_proto_rawDesc = nil - file_reflect_protodesc_proto_go_features_proto_goTypes = nil - file_reflect_protodesc_proto_go_features_proto_depIdxs = nil + File_google_protobuf_go_features_proto = out.File + file_google_protobuf_go_features_proto_goTypes = nil + file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto deleted file mode 100644 index d2465712..00000000 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto +++ /dev/null @@ -1,28 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2023 Google Inc. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd - -syntax = "proto2"; - -package google.protobuf; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/protobuf/types/gofeaturespb"; - -extend google.protobuf.FeatureSet { - optional GoFeatures go = 1002; -} - -message GoFeatures { - // Whether or not to generate the deprecated UnmarshalJSON method for enums. - optional bool legacy_unmarshal_json_enum = 1 [ - retention = RETENTION_RUNTIME, - targets = TARGET_TYPE_ENUM, - edition_defaults = { edition: EDITION_PROTO2, value: "true" }, - edition_defaults = { edition: EDITION_PROTO3, value: "false" } - ]; -} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be5..497da66e 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -122,6 +122,7 @@ import ( reflect "reflect" strings "strings" sync "sync" + unsafe "unsafe" ) // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -210,10 +211,7 @@ import ( // "value": "1.212s" // } type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. This string must contain at least // one "/" character. The last segment of the URL's path must represent @@ -244,7 +242,9 @@ type Any struct { // used with implementation specific semantics. TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New marshals src into a new Any instance. @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -414,7 +412,7 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = []byte{ +var file_google_protobuf_any_proto_rawDesc = string([]byte{ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, @@ -430,22 +428,22 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc + file_google_protobuf_any_proto_rawDescData []byte ) func file_google_protobuf_any_proto_rawDescGZIP() []byte { file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) }) return file_google_protobuf_any_proto_rawDescData } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -461,25 +459,11 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -490,7 +474,6 @@ func file_google_protobuf_any_proto_init() { MessageInfos: file_google_protobuf_any_proto_msgTypes, }.Build() File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil file_google_protobuf_any_proto_goTypes = nil file_google_protobuf_any_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 00000000..193880d1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,357 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// # Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// # Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" + unsafe "unsafe" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +type Duration struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = string([]byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData []byte +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc))) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []any{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go new file mode 100644 index 00000000..ecdd31ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -0,0 +1,791 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +// Package structpb contains generated types for google/protobuf/struct.proto. +// +// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are +// used to represent arbitrary JSON. The Value message represents a JSON value, +// the Struct message represents a JSON object, and the ListValue message +// represents a JSON array. See https://json.org for more information. +// +// The Value, Struct, and ListValue types have generated MarshalJSON and +// UnmarshalJSON methods such that they serialize JSON equivalent to what the +// messages themselves represent. Use of these types with the +// "google.golang.org/protobuf/encoding/protojson" package +// ensures that they will be serialized as their JSON equivalent. +// +// # Conversion to and from a Go interface +// +// The standard Go "encoding/json" package has functionality to serialize +// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and +// ListValue.AsSlice methods can convert the protobuf message representation into +// a form represented by any, map[string]any, and []any. +// This form can be used with other packages that operate on such data structures +// and also directly with the standard json package. +// +// In order to convert the any, map[string]any, and []any +// forms back as Value, Struct, and ListValue messages, use the NewStruct, +// NewList, and NewValue constructor functions. +// +// # Example usage +// +// Consider the following example JSON object: +// +// { +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": { +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100" +// }, +// "phoneNumbers": [ +// { +// "type": "home", +// "number": "212 555-1234" +// }, +// { +// "type": "office", +// "number": "646 555-4567" +// } +// ], +// "children": [], +// "spouse": null +// } +// +// To construct a Value message representing the above JSON object: +// +// m, err := structpb.NewValue(map[string]any{ +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": map[string]any{ +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100", +// }, +// "phoneNumbers": []any{ +// map[string]any{ +// "type": "home", +// "number": "212 555-1234", +// }, +// map[string]any{ +// "type": "office", +// "number": "646 555-4567", +// }, +// }, +// "children": []any{}, +// "spouse": nil, +// }) +// if err != nil { +// ... // handle error +// } +// ... // make use of m as a *structpb.Value +package structpb + +import ( + base64 "encoding/base64" + json "encoding/json" + protojson "google.golang.org/protobuf/encoding/protojson" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + utf8 "unicode/utf8" + unsafe "unsafe" +) + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +// Enum value maps for NullValue. +var ( + NullValue_name = map[int32]string{ + 0: "NULL_VALUE", + } + NullValue_value = map[string]int32{ + "NULL_VALUE": 0, + } +) + +func (x NullValue) Enum() *NullValue { + p := new(NullValue) + *p = x + return p +} + +func (x NullValue) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NullValue) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_struct_proto_enumTypes[0].Descriptor() +} + +func (NullValue) Type() protoreflect.EnumType { + return &file_google_protobuf_struct_proto_enumTypes[0] +} + +func (x NullValue) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NullValue.Descriptor instead. +func (NullValue) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// NewStruct constructs a Struct from a general-purpose Go map. +// The map keys must be valid UTF-8. +// The map values are converted using NewValue. +func NewStruct(v map[string]any) (*Struct, error) { + x := &Struct{Fields: make(map[string]*Value, len(v))} + for k, v := range v { + if !utf8.ValidString(k) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k) + } + var err error + x.Fields[k], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsMap converts x to a general-purpose Go map. +// The map values are converted by calling Value.AsInterface. +func (x *Struct) AsMap() map[string]any { + f := x.GetFields() + vs := make(map[string]any, len(f)) + for k, v := range f { + vs[k] = v.AsInterface() + } + return vs +} + +func (x *Struct) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Struct) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Struct) Reset() { + *x = Struct{} + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Struct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Struct) ProtoMessage() {} + +func (x *Struct) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Struct.ProtoReflect.Descriptor instead. +func (*Struct) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *Struct) GetFields() map[string]*Value { + if x != nil { + return x.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of these +// variants. Absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// NewValue constructs a Value from a general-purpose Go interface. +// +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ +// +// When converting an int64 or uint64 to a NumberValue, numeric precision loss +// is possible since they are stored as a float64. +func NewValue(v any) (*Value, error) { + switch v := v.(type) { + case nil: + return NewNullValue(), nil + case bool: + return NewBoolValue(v), nil + case int: + return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil + case int32: + return NewNumberValue(float64(v)), nil + case int64: + return NewNumberValue(float64(v)), nil + case uint: + return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil + case uint32: + return NewNumberValue(float64(v)), nil + case uint64: + return NewNumberValue(float64(v)), nil + case float32: + return NewNumberValue(float64(v)), nil + case float64: + return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil + case string: + if !utf8.ValidString(v) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) + } + return NewStringValue(v), nil + case []byte: + s := base64.StdEncoding.EncodeToString(v) + return NewStringValue(s), nil + case map[string]any: + v2, err := NewStruct(v) + if err != nil { + return nil, err + } + return NewStructValue(v2), nil + case []any: + v2, err := NewList(v) + if err != nil { + return nil, err + } + return NewListValue(v2), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +// NewNullValue constructs a new null Value. +func NewNullValue() *Value { + return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}} +} + +// NewBoolValue constructs a new boolean Value. +func NewBoolValue(v bool) *Value { + return &Value{Kind: &Value_BoolValue{BoolValue: v}} +} + +// NewNumberValue constructs a new number Value. +func NewNumberValue(v float64) *Value { + return &Value{Kind: &Value_NumberValue{NumberValue: v}} +} + +// NewStringValue constructs a new string Value. +func NewStringValue(v string) *Value { + return &Value{Kind: &Value_StringValue{StringValue: v}} +} + +// NewStructValue constructs a new struct Value. +func NewStructValue(v *Struct) *Value { + return &Value{Kind: &Value_StructValue{StructValue: v}} +} + +// NewListValue constructs a new list Value. +func NewListValue(v *ListValue) *Value { + return &Value{Kind: &Value_ListValue{ListValue: v}} +} + +// AsInterface converts x to a general-purpose Go interface. +// +// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce +// semantically equivalent JSON (assuming no errors occur). +// +// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are +// converted as strings to remain compatible with MarshalJSON. +func (x *Value) AsInterface() any { + switch v := x.GetKind().(type) { + case *Value_NumberValue: + if v != nil { + switch { + case math.IsNaN(v.NumberValue): + return "NaN" + case math.IsInf(v.NumberValue, +1): + return "Infinity" + case math.IsInf(v.NumberValue, -1): + return "-Infinity" + default: + return v.NumberValue + } + } + case *Value_StringValue: + if v != nil { + return v.StringValue + } + case *Value_BoolValue: + if v != nil { + return v.BoolValue + } + case *Value_StructValue: + if v != nil { + return v.StructValue.AsMap() + } + case *Value_ListValue: + if v != nil { + return v.ListValue.AsSlice() + } + } + return nil +} + +func (x *Value) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Value) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Value) Reset() { + *x = Value{} + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} +} + +func (x *Value) GetKind() isValue_Kind { + if x != nil { + return x.Kind + } + return nil +} + +func (x *Value) GetNullValue() NullValue { + if x != nil { + if x, ok := x.Kind.(*Value_NullValue); ok { + return x.NullValue + } + } + return NullValue_NULL_VALUE +} + +func (x *Value) GetNumberValue() float64 { + if x != nil { + if x, ok := x.Kind.(*Value_NumberValue); ok { + return x.NumberValue + } + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x != nil { + if x, ok := x.Kind.(*Value_StringValue); ok { + return x.StringValue + } + } + return "" +} + +func (x *Value) GetBoolValue() bool { + if x != nil { + if x, ok := x.Kind.(*Value_BoolValue); ok { + return x.BoolValue + } + } + return false +} + +func (x *Value) GetStructValue() *Struct { + if x != nil { + if x, ok := x.Kind.(*Value_StructValue); ok { + return x.StructValue + } + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x != nil { + if x, ok := x.Kind.(*Value_ListValue); ok { + return x.ListValue + } + } + return nil +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + // Represents a null value. + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + // Represents a double value. + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + // Represents a string value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + // Represents a boolean value. + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + // Represents a structured value. + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + // Represents a repeated `Value`. + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// NewList constructs a ListValue from a general-purpose Go slice. +// The slice elements are converted using NewValue. +func NewList(v []any) (*ListValue, error) { + x := &ListValue{Values: make([]*Value, len(v))} + for i, v := range v { + var err error + x.Values[i], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsSlice converts x to a general-purpose Go slice. +// The slice elements are converted by calling Value.AsInterface. +func (x *ListValue) AsSlice() []any { + vals := x.GetValues() + vs := make([]any, len(vals)) + for i, v := range vals { + vs[i] = v.AsInterface() + } + return vs +} + +func (x *ListValue) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *ListValue) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *ListValue) Reset() { + *x = ListValue{} + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +var File_google_protobuf_struct_proto protoreflect.FileDescriptor + +var file_google_protobuf_struct_proto_rawDesc = string([]byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, + 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, + 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, + 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, + 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, + 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, + 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +}) + +var ( + file_google_protobuf_struct_proto_rawDescOnce sync.Once + file_google_protobuf_struct_proto_rawDescData []byte +) + +func file_google_protobuf_struct_proto_rawDescGZIP() []byte { + file_google_protobuf_struct_proto_rawDescOnce.Do(func() { + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc))) + }) + return file_google_protobuf_struct_proto_rawDescData +} + +var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_struct_proto_goTypes = []any{ + (NullValue)(0), // 0: google.protobuf.NullValue + (*Struct)(nil), // 1: google.protobuf.Struct + (*Value)(nil), // 2: google.protobuf.Value + (*ListValue)(nil), // 3: google.protobuf.ListValue + nil, // 4: google.protobuf.Struct.FieldsEntry +} +var file_google_protobuf_struct_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry + 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct + 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue + 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value + 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_protobuf_struct_proto_init() } +func file_google_protobuf_struct_proto_init() { + if File_google_protobuf_struct_proto != nil { + return + } + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)), + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_struct_proto_goTypes, + DependencyIndexes: file_google_protobuf_struct_proto_depIdxs, + EnumInfos: file_google_protobuf_struct_proto_enumTypes, + MessageInfos: file_google_protobuf_struct_proto_msgTypes, + }.Build() + File_google_protobuf_struct_proto = out.File + file_google_protobuf_struct_proto_goTypes = nil + file_google_protobuf_struct_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go new file mode 100644 index 00000000..00ac835c --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -0,0 +1,366 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// # Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// # Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +package timestamppb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + time "time" + unsafe "unsafe" +) + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() +// ) to obtain a formatter capable of generating timestamps in this format. +type Timestamp struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor + +var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_google_protobuf_timestamp_proto_rawDescOnce sync.Once + file_google_protobuf_timestamp_proto_rawDescData []byte +) + +func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { + file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc))) + }) + return file_google_protobuf_timestamp_proto_rawDescData +} + +var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_timestamp_proto_goTypes = []any{ + (*Timestamp)(nil), // 0: google.protobuf.Timestamp +} +var file_google_protobuf_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_timestamp_proto_init() } +func file_google_protobuf_timestamp_proto_init() { + if File_google_protobuf_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_timestamp_proto_goTypes, + DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, + MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, + }.Build() + File_google_protobuf_timestamp_proto = out.File + file_google_protobuf_timestamp_proto_goTypes = nil + file_google_protobuf_timestamp_proto_depIdxs = nil +} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc deleted file mode 100644 index 730e569b..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc +++ /dev/null @@ -1 +0,0 @@ -'|&{tU|gG(Cy=+c:u:/p#~["4!nADK merged.coverprofile -- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/README.md deleted file mode 100644 index b877f412..00000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# go-jose v2 - -Version 2 of this library is no longer supported. [Please use v4 -instead](https://pkg.go.dev/github.com/go-jose/go-jose/v4). diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 026be9e3..47ec9466 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error { return nil } -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - var logging loggingT var commandLine flag.FlagSet @@ -486,7 +479,7 @@ type settings struct { // Access to all of the following fields must be protected via a mutex. // file holds writer for each of the log types. - file [severity.NumSeverity]flushSyncWriter + file [severity.NumSeverity]io.Writer // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration @@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, buffer.PutBuffer(b) } -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() for s := severity.FatalLog; s >= severity.InfoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb + logging.file[s] = w } } @@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) { if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb + logging.file[sev] = w } // LogToStderr sets whether to log exclusively to stderr, bypassing outputs @@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) { logExitFunc(err) return } - l.flushAll() + needToSync := l.flushAll() + l.syncAll(needToSync) OsExit(2) } @@ -1028,10 +999,6 @@ type syncBuffer struct { maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. func CalculateMaxSize() uint64 { if logging.logFile != "" { @@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) { // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() - l.flushAll() + needToSync := l.flushAll() l.mu.Unlock() + // Some environments are slow when syncing and holding the lock might cause contention. + l.syncAll(needToSync) } -// flushAll flushes all the logs and attempts to "sync" their data to disk. +// flushAll flushes all the logs // l.mu is held. -func (l *loggingT) flushAll() { +// +// The result is the number of files which need to be synced and the pointers to them. +func (l *loggingT) flushAll() fileArray { + var needToSync fileArray + // Flush from fatal down, in case there's trouble flushing. for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] - if file != nil { - _ = file.Flush() // ignore error - _ = file.Sync() // ignore error + if sb, ok := file.(*syncBuffer); ok && sb.file != nil { + _ = sb.Flush() // ignore error + needToSync.files[needToSync.num] = sb.file + needToSync.num++ } } if logging.loggerOptions.flush != nil { logging.loggerOptions.flush() } + return needToSync +} + +type fileArray struct { + num int + files [severity.NumSeverity]*os.File +} + +// syncAll attempts to "sync" their data to disk. +func (l *loggingT) syncAll(needToSync fileArray) { + // Flush from fatal down, in case there's trouble flushing. + for i := 0; i < needToSync.num; i++ { + _ = needToSync.files[i].Sync() // ignore error + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go index e4e740ca..e0811e83 100644 --- a/vendor/k8s.io/utils/integer/integer.go +++ b/vendor/k8s.io/utils/integer/integer.go @@ -16,6 +16,8 @@ limitations under the License. package integer +import "math" + // IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { @@ -65,9 +67,7 @@ func Int64Min(a, b int64) int64 { } // RoundToInt32 rounds floats into integer numbers. +// Deprecated: use math.Round() and a cast directly. func RoundToInt32(a float64) int32 { - if a < 0 { - return int32(a - 0.5) - } - return int32(a + 0.5) + return int32(math.Round(a)) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 1d7879f8..48cec40e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -41,8 +41,8 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 -# github.com/containerd/stargz-snapshotter/estargz v0.14.3 -## explicit; go 1.19 +# github.com/containerd/stargz-snapshotter/estargz v0.16.3 +## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 @@ -57,7 +57,7 @@ github.com/digitorus/pkcs7 # github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 ## explicit; go 1.16 github.com/digitorus/timestamp -# github.com/docker/cli v24.0.7+incompatible +# github.com/docker/cli v27.5.0+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -66,10 +66,7 @@ github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.3+incompatible ## explicit github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v27.1.2+incompatible -## explicit -github.com/docker/docker/pkg/homedir -# github.com/docker/docker-credential-helpers v0.8.0 +# github.com/docker/docker-credential-helpers v0.8.2 ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials @@ -80,14 +77,20 @@ github.com/dustin/go-humanize ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/go-chi/chi v4.1.2+incompatible ## explicit github.com/go-chi/chi github.com/go-chi/chi/middleware -# github.com/go-logr/logr v1.4.1 +# github.com/go-jose/go-jose/v4 v4.0.4 +## explicit; go 1.21 +github.com/go-jose/go-jose/v4 +github.com/go-jose/go-jose/v4/cipher +github.com/go-jose/go-jose/v4/json +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -149,11 +152,16 @@ github.com/golang/protobuf/proto # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/certificate-transparency-go v1.1.8 -## explicit; go 1.21 +# github.com/google/certificate-transparency-go v1.3.1 +## explicit; go 1.22.0 github.com/google/certificate-transparency-go github.com/google/certificate-transparency-go/asn1 +github.com/google/certificate-transparency-go/client +github.com/google/certificate-transparency-go/client/configpb +github.com/google/certificate-transparency-go/ctutil github.com/google/certificate-transparency-go/gossip/minimal/x509ext +github.com/google/certificate-transparency-go/jsonclient +github.com/google/certificate-transparency-go/loglist3 github.com/google/certificate-transparency-go/tls github.com/google/certificate-transparency-go/x509 github.com/google/certificate-transparency-go/x509/pkix @@ -165,8 +173,8 @@ github.com/google/gnostic-models/extensions github.com/google/gnostic-models/jsonschema github.com/google/gnostic-models/openapiv2 github.com/google/gnostic-models/openapiv3 -# github.com/google/go-containerregistry v0.19.1 -## explicit; go 1.18 +# github.com/google/go-containerregistry v0.20.3 +## explicit; go 1.23.0 github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression github.com/google/go-containerregistry/internal/estargz @@ -226,6 +234,9 @@ github.com/hashicorp/hcl/json/token # github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo +# github.com/in-toto/attestation v1.1.0 +## explicit; go 1.20 +github.com/in-toto/attestation/go/v1 # github.com/in-toto/in-toto-golang v0.9.0 ## explicit; go 1.20 github.com/in-toto/in-toto-golang/in_toto @@ -245,8 +256,8 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.4 -## explicit; go 1.19 +# github.com/klauspost/compress v1.17.11 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -254,15 +265,15 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 -## explicit; go 1.21 +# github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec +## explicit; go 1.22.0 github.com/letsencrypt/boulder/core github.com/letsencrypt/boulder/goodkey github.com/letsencrypt/boulder/identifier github.com/letsencrypt/boulder/probs github.com/letsencrypt/boulder/revocation github.com/letsencrypt/boulder/strictyaml -# github.com/magiconair/properties v1.8.7 +# github.com/magiconair/properties v1.8.9 ## explicit; go 1.19 github.com/magiconair/properties # github.com/mailru/easyjson v0.7.7 @@ -276,7 +287,7 @@ github.com/miekg/pkcs11 # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.5.0 +# github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c ## explicit; go 1.14 github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -306,8 +317,8 @@ github.com/opencontainers/image-spec/specs-go/v1 github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log -# github.com/pelletier/go-toml/v2 v2.1.0 -## explicit; go 1.16 +# github.com/pelletier/go-toml/v2 v2.2.3 +## explicit; go 1.21.0 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters github.com/pelletier/go-toml/v2/internal/danger @@ -326,7 +337,7 @@ github.com/sagikazarmark/slog-shim ## explicit github.com/sassoftware/relic/lib/pkcs7 github.com/sassoftware/relic/lib/x509tools -# github.com/secure-systems-lab/go-securesystemslib v0.8.0 +# github.com/secure-systems-lab/go-securesystemslib v0.9.0 ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/cjson github.com/secure-systems-lab/go-securesystemslib/dsse @@ -335,8 +346,8 @@ github.com/secure-systems-lab/go-securesystemslib/signerverifier # github.com/shibumi/go-pathspec v1.3.0 ## explicit; go 1.17 github.com/shibumi/go-pathspec -# github.com/sigstore/cosign/v2 v2.2.4 -## explicit; go 1.21 +# github.com/sigstore/cosign/v2 v2.4.3 +## explicit; go 1.23.4 github.com/sigstore/cosign/v2/internal/pkg/cosign github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size github.com/sigstore/cosign/v2/internal/pkg/now @@ -362,8 +373,15 @@ github.com/sigstore/cosign/v2/pkg/oci/signed github.com/sigstore/cosign/v2/pkg/oci/static github.com/sigstore/cosign/v2/pkg/signature github.com/sigstore/cosign/v2/pkg/types -# github.com/sigstore/rekor v1.3.6 -## explicit; go 1.21 +# github.com/sigstore/protobuf-specs v0.4.0 +## explicit; go 1.22.0 +github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1 +github.com/sigstore/protobuf-specs/gen/pb-go/common/v1 +github.com/sigstore/protobuf-specs/gen/pb-go/dsse +github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1 +github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1 +# github.com/sigstore/rekor v1.3.9 +## explicit; go 1.22.0 github.com/sigstore/rekor/pkg/client github.com/sigstore/rekor/pkg/generated/client github.com/sigstore/rekor/pkg/generated/client/entries @@ -380,6 +398,7 @@ github.com/sigstore/rekor/pkg/pki/pkcs7 github.com/sigstore/rekor/pkg/pki/ssh github.com/sigstore/rekor/pkg/pki/tuf github.com/sigstore/rekor/pkg/pki/x509 +github.com/sigstore/rekor/pkg/tle github.com/sigstore/rekor/pkg/types github.com/sigstore/rekor/pkg/types/dsse github.com/sigstore/rekor/pkg/types/dsse/v0.0.1 @@ -391,17 +410,30 @@ github.com/sigstore/rekor/pkg/types/intoto/v0.0.2 github.com/sigstore/rekor/pkg/types/rekord github.com/sigstore/rekor/pkg/types/rekord/v0.0.1 github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.3 -## explicit; go 1.20 +github.com/sigstore/rekor/pkg/verify +# github.com/sigstore/sigstore v1.8.15 +## explicit; go 1.22.0 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/dsse github.com/sigstore/sigstore/pkg/signature/kms +github.com/sigstore/sigstore/pkg/signature/kms/cliplugin +github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common +github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding +github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/internal/signerverifier github.com/sigstore/sigstore/pkg/signature/options github.com/sigstore/sigstore/pkg/signature/payload github.com/sigstore/sigstore/pkg/tuf -# github.com/sigstore/timestamp-authority v1.2.2 -## explicit; go 1.21 +# github.com/sigstore/sigstore-go v0.7.0 +## explicit; go 1.22.0 +github.com/sigstore/sigstore-go/pkg/fulcio/certificate +github.com/sigstore/sigstore-go/pkg/root +github.com/sigstore/sigstore-go/pkg/tlog +github.com/sigstore/sigstore-go/pkg/tuf +github.com/sigstore/sigstore-go/pkg/util +github.com/sigstore/sigstore-go/pkg/verify +# github.com/sigstore/timestamp-authority v1.2.4 +## explicit; go 1.22.0 github.com/sigstore/timestamp-authority/pkg/verification # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 @@ -417,17 +449,17 @@ github.com/sourcegraph/conc/panics github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.6.0 +# github.com/spf13/cast v1.7.0 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.8.0 +# github.com/spf13/cobra v1.9.1 ## explicit; go 1.15 github.com/spf13/cobra -# github.com/spf13/pflag v1.0.5 +# github.com/spf13/pflag v1.0.6 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.18.2 -## explicit; go 1.18 +# github.com/spf13/viper v1.19.0 +## explicit; go 1.20 github.com/spf13/viper github.com/spf13/viper/internal/encoding github.com/spf13/viper/internal/encoding/dotenv @@ -473,6 +505,13 @@ github.com/theupdateframework/go-tuf/pkg/targets github.com/theupdateframework/go-tuf/sign github.com/theupdateframework/go-tuf/util github.com/theupdateframework/go-tuf/verify +# github.com/theupdateframework/go-tuf/v2 v2.0.2 +## explicit; go 1.21 +github.com/theupdateframework/go-tuf/v2/metadata +github.com/theupdateframework/go-tuf/v2/metadata/config +github.com/theupdateframework/go-tuf/v2/metadata/fetcher +github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata +github.com/theupdateframework/go-tuf/v2/metadata/updater # github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 ## explicit github.com/titanous/rocacheck @@ -482,12 +521,12 @@ github.com/transparency-dev/merkle github.com/transparency-dev/merkle/compact github.com/transparency-dev/merkle/proof github.com/transparency-dev/merkle/rfc6962 -# github.com/vbatts/tar-split v0.11.5 +# github.com/vbatts/tar-split v0.11.6 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar -# github.com/xanzy/go-gitlab v0.102.0 -## explicit; go 1.19 -github.com/xanzy/go-gitlab +# gitlab.com/gitlab-org/api/client-go v0.123.0 +## explicit; go 1.22 +gitlab.com/gitlab-org/api/client-go # go.mongodb.org/mongo-driver v1.14.0 ## explicit; go 1.18 go.mongodb.org/mongo-driver/bson @@ -497,8 +536,12 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opentelemetry.io/otel v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry +# go.opentelemetry.io/otel v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -511,14 +554,16 @@ go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal/v2 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.17.0/httpconv -# go.opentelemetry.io/otel/metric v1.24.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/metric v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/trace v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel/trace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded +go.opentelemetry.io/otel/trace/noop # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr @@ -533,7 +578,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.31.0 +# golang.org/x/crypto v0.33.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -563,40 +608,41 @@ golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 -## explicit; go 1.20 +# golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f +## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/mod v0.17.0 -## explicit; go 1.18 +# golang.org/x/mod v0.22.0 +## explicit; go 1.22.0 golang.org/x/mod/sumdb/note -# golang.org/x/net v0.33.0 +# golang.org/x/net v0.35.0 ## explicit; go 1.18 +golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/oauth2 v0.19.0 +golang.org/x/net/internal/httpcommon +# golang.org/x/oauth2 v0.26.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.10.0 +# golang.org/x/sync v0.11.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.28.0 +# golang.org/x/sys v0.30.0 ## explicit; go 1.18 golang.org/x/sys/cpu -golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.27.0 +# golang.org/x/term v0.29.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.21.0 +# golang.org/x/text v0.22.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/internal @@ -608,18 +654,25 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.5.0 +# golang.org/x/time v0.10.0 ## explicit; go 1.18 golang.org/x/time/rate -# google.golang.org/protobuf v1.33.0 -## explicit; go 1.17 +# google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 +google.golang.org/genproto/googleapis/api +google.golang.org/genproto/googleapis/api/annotations +# google.golang.org/protobuf v1.36.5 +## explicit; go 1.21 +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/editiondefaults +google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text @@ -631,6 +684,7 @@ google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/protolazy google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version @@ -643,11 +697,9 @@ google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb -# gopkg.in/go-jose/go-jose.v2 v2.6.3 -## explicit -gopkg.in/go-jose/go-jose.v2 -gopkg.in/go-jose/go-jose.v2/cipher -gopkg.in/go-jose/go-jose.v2/json +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/structpb +google.golang.org/protobuf/types/known/timestamppb # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 @@ -888,7 +940,7 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/klog/v2 v2.120.1 +# k8s.io/klog/v2 v2.130.1 ## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -908,7 +960,7 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20230726121419-3b25d923346b +# k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 ## explicit; go 1.18 k8s.io/utils/clock k8s.io/utils/clock/testing